DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support
@ 2021-09-09 14:24 Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 1/4] common/iavf: " Radu Nicolau
                   ` (16 more replies)
  0 siblings, 17 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-09 14:24 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (4):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: Add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |  215 +-
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   53 +-
 drivers/net/iavf/iavf_ethdev.c                |  222 +-
 drivers/net/iavf/iavf_generic_flow.c          |   11 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1921 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  803 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  567 ++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 16 files changed, 4684 insertions(+), 339 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH 1/4] common/iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-09-09 14:24 ` Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 2/4] net/iavf: " Radu Nicolau
                   ` (15 subsequent siblings)
  16 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-09 14:24 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             | 215 +++++++-
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 775 insertions(+), 10 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..1f8f8ae5fd 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -709,11 +709,29 @@ enum iavf_rx_prog_status_desc_error_bits {
 #define IAVF_FOUR_BIT_MASK	0xF
 #define IAVF_EIGHTEEN_BIT_MASK	0x3FFFF
 
-/* TX Descriptor */
+/* TX Data Descriptor */
 struct iavf_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
+	union {
+		struct {
+			__le64 buffer_addr; /* Addr of descriptor's data buf */
+			__le64 cmd_type_offset_bsz;
+		};
+		struct {
+			__le64 qw0; /**< data buffer address */
+			__le64 qw1; /**< dtyp, cmd, offset, buf_sz and l2tag1 */
+		};
+		struct {
+			__le64 buffer_addr;	/**< Data buffer address */
+			__le64 type:4;		/**< Descriptor type */
+			__le64 cmd:12;		/**< Command field */
+			__le64 offset_l2len:7;	/**< L2 header length */
+			__le64 offset_l3len:7;	/**< L3 header length */
+			__le64 offset_l4len:4;	/**< L4 header length */
+			__le64 buffer_sz:14;	/**< Data buffer size */
+			__le64 l2tag1:16;	/**< L2 Tag 1 value */
+		} debug __rte_packed;
+	};
+} __rte_packed;
 
 #define IAVF_TXD_QW1_DTYPE_SHIFT	0
 #define IAVF_TXD_QW1_DTYPE_MASK		(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -723,6 +741,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
@@ -734,7 +753,7 @@ enum iavf_tx_desc_dtype_value {
 #define IAVF_TXD_QW1_CMD_SHIFT	4
 #define IAVF_TXD_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
 
-enum iavf_tx_desc_cmd_bits {
+enum iavf_tx_data_desc_cmd_bits {
 	IAVF_TX_DESC_CMD_EOP			= 0x0001,
 	IAVF_TX_DESC_CMD_RS			= 0x0002,
 	IAVF_TX_DESC_CMD_ICRC			= 0x0004,
@@ -778,18 +797,79 @@ enum iavf_tx_desc_length_fields {
 #define IAVF_TXD_QW1_L2TAG1_SHIFT	48
 #define IAVF_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
 
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
 /* Context descriptors */
 struct iavf_tx_context_desc {
+	union {
+		struct {
 	__le32 tunneling_params;
 	__le16 l2tag2;
 	__le16 rsvd;
 	__le64 type_cmd_tso_mss;
 };
-
-#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	0
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le32 tunneling;
+			__le16 l2tag2;
+			__le16 rsvd0;
+			__le64 type:4;
+			__le64 cmd:7;
+			__le64 ipsec:7;
+			__le64 rsvd1:12;
+			__le64 tlen_tsyn:18;
+			__le64 rsvd2:2;
+			__le64 mss_target_vsi:14;
+		} debug __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
 
-#define IAVF_TXD_CTX_QW1_CMD_SHIFT	4
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT	(4)
 #define IAVF_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
 
 enum iavf_tx_ctx_desc_cmd_bits {
@@ -804,6 +884,63 @@ enum iavf_tx_ctx_desc_cmd_bits {
 	IAVF_TX_CTX_DESC_SWPE		= 0x40
 };
 
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
 struct iavf_nop_desc {
 	__le64 rsvd;
 	__le64 dtype_cmd;
@@ -911,6 +1048,68 @@ enum iavf_tx_ctx_desc_eipt_offload {
 #define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT	23
 #define IAVF_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
 
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
 /* Statistics collected by each port, VSI, VEB, and S-channel */
 struct iavf_eth_stats {
 	u64 rx_bytes;			/* gorc */
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 1cf0866124..efb4cca197 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -226,6 +229,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -388,7 +393,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2320,6 +2325,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH 2/4] net/iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 1/4] common/iavf: " Radu Nicolau
@ 2021-09-09 14:24 ` Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
                   ` (14 subsequent siblings)
  16 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-09 14:24 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h                       |   26 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   11 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1921 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  803 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  579 ++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 13 files changed, 3729 insertions(+), 313 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd078111..934ef48278 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -189,6 +189,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -216,6 +217,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -238,6 +240,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -249,11 +252,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -272,6 +278,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -340,9 +348,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
@@ -399,5 +422,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 574cfe055e..d4f5d123e2 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -29,6 +29,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -70,6 +71,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -921,6 +927,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -930,7 +939,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1739,6 +1755,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1747,8 +1764,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2341,6 +2358,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 1fe270fb22..51896a5f4b 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1709,6 +1709,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2021,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..a7607c5699
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1921 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+		uint16_t mss;
+	} tso;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return 0;
+
+	if (increment == 0)
+		return 1;
+
+	if ((len - min) % increment)
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Unsupported action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		RTE_ASSERT("we should be here");
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		RTE_ASSERT("we should be here");
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		RTE_ASSERT(1);
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	/* verify that key length is with bounds of key_data array */
+	RTE_ASSERT(sizeof(cfg->key_data) <= cfg->key_len);
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = htonl(conf->ipsec.spi);
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.tso) {
+		sess->tso.enabled = 1;
+		sess->tso.mss = conf->ipsec.mss;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return 0;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return 0;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return 1;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return 0;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	/* update TSO MSS size */
+	if (iavf_sess->tso.enabled && conf->ipsec.options.tso)
+		iavf_sess->tso.mss = conf->ipsec.mss;
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return 1;
+
+	return 0;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const struct rte_flow_action_security **action)
+{
+	int i;
+	for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+		if (actions[i].type == RTE_FLOW_ACTION_TYPE_SECURITY)	{
+			if (action != NULL)
+				*action = actions[i].conf;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const struct rte_flow_action_security *action;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &action))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, action->security_session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = 0;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta)
+			*meta = fi;
+	} else {
+		ret = -rte_errno;
+	}
+
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..d8d7d6649e
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index e33fe4576b..8b85213acb 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -464,6 +468,44 @@ iavf_rxd_to_pkt_fields_by_comms_aux_v2(struct iavf_rx_queue *rxq,
 #endif
 }
 
+static inline void
+iavf_rxd_to_pkt_fields_by_comms_ipsec_crypto(struct iavf_rx_queue *rxq,
+				       struct rte_mbuf *mb,
+				       volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms *desc =
+			(volatile struct iavf_32b_rx_flex_desc_comms *)rxdp;
+	uint16_t stat_err;
+
+	stat_err = rte_le_to_cpu_16(desc->status_error0);
+	if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+		mb->ol_flags |= PKT_RX_RSS_HASH;
+		mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+	}
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	if (desc->flow_id != 0xFFFFFFFF) {
+		mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+		mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+	}
+
+	if (rxq->xtr_ol_flag) {
+		uint32_t metadata = 0;
+
+		if (desc->flex_ts.flex.aux0 != 0xFFFF)
+			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+		else if (desc->flex_ts.flex.aux1 != 0xFFFF)
+			metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+		if (metadata) {
+			mb->ol_flags |= rxq->xtr_ol_flag;
+
+			*RTE_PMD_IFD_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+		}
+	}
+#endif
+}
+
 static void
 iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 {
@@ -500,6 +542,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -684,6 +732,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -728,9 +778,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -754,6 +804,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1044,29 +1098,97 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
+	}
 #endif
+}
 
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
 	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
 }
 
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1186,7 +1308,7 @@ iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
 			   rxq->port_id, rxq->queue_id, rx_id, nb_hold);
 		rx_id = (uint16_t)((rx_id == 0) ?
 			(rxq->nb_rx_desc - 1) : (rx_id - 1));
-		IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+		IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
 		nb_hold = 0;
 	}
 	rxq->nb_rx_hold = nb_hold;
@@ -1384,8 +1506,13 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
+
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
 
@@ -1526,7 +1653,10 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
+
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1764,7 +1894,10 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
+
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -1935,7 +2068,7 @@ iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
 
 	/* Update rx tail register */
 	rte_wmb();
-	IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+	IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
 
 	rxq->rx_free_trigger =
 		(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -2034,7 +2167,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+	if ((txd[desc_to_clean_to].qw1 &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
@@ -2050,7 +2183,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 					last_desc_cleaned);
 
-	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+	txd[desc_to_clean_to].qw1 = 0;
 
 	txq->last_desc_cleaned = desc_to_clean_to;
 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -2059,189 +2192,393 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 }
 
 /* Check if the context descriptor is needed for TX offloading */
+static inline uint8_t
+iavf_ctx_desc_required(const struct rte_mbuf *m)
+{
+	return m->ol_flags &
+		(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK) ? 1 : 0;
+}
+
+static inline uint8_t
+iavf_data_desc_required(const struct rte_mbuf *m)
+{
+	return m->nb_segs;
+}
+
+/* Check if the ipsec descriptor is needed for TX offloading */
+static inline uint8_t
+iavf_ipsec_desc_required(const struct rte_mbuf *m)
+{
+	const uint64_t mask = PKT_TX_SEC_OFFLOAD;
+
+	return m->ol_flags & mask ? 1 : 0;
+}
+
+static inline void
+iavf_fill_desc_type_field(volatile uint64_t *field, uint64_t value)
+{
+	*field = value;
+}
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
+{
+	/* fill descriptor type field */
+	iavf_fill_desc_type_field(&desc->qw1, IAVF_TX_DESC_DTYPE_CONTEXT);
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc->qw1, ipsec_md);
+
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
+				m, ipsec_md);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc->qw0, m);
+	else
+		desc->qw0 = 0;
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
+}
+
+
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
 }
 
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	iavf_fill_desc_type_field(qw1, IAVF_TX_DESC_DTYPE_DATA);
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_from_template(volatile uint64_t *field,  uint64_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
+	*field = value;
+}
 
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
+{
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	iavf_fill_data_desc_from_template(&desc->qw1, desc_template);
+
+	/* set data buffer address */
+	desc->qw0 = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
+
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+}
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* set data buffer size */
+	iavf_fill_data_desc_buffer_sz_field(&desc->qw1, bufsz);
 
-	return ctx_desc;
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
+
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
 {
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
 }
 
 /* TX function */
 uint16_t
-iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+iavf_xmit_pkts(void *tx_queue, struct rte_mbuf *mbufs[], uint16_t nb_mbufs)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
-	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *desc_ring = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
+	struct iavf_tx_entry *txe_current, *txe_next;
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe_current = &txe_ring[desc_idx];
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
 
-		tx_pkt = *tx_pkts++;
-		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+	for (idx = 0; idx < nb_mbufs; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
 
-		/* The number of descriptors that must be allocated for
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = mbufs[idx];
+
+		RTE_MBUF_PREFETCH_TO_FREE(txe_current->mbuf);
+
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
+		nb_desc_data = iavf_data_desc_required(mb);
+		nb_desc_ctx = iavf_ctx_desc_required(mb);
+		nb_desc_ipsec = iavf_ipsec_desc_required(mb);
+
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2249,122 +2586,114 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
-
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&desc_ring[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
 
-			txn = &sw_ring[txe->next_id];
-			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-			if (txe->mbuf) {
-				rte_pktmbuf_free_seg(txe->mbuf);
-				txe->mbuf = NULL;
+			if (txe_current->mbuf) {
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+				txe_current->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&desc_ring[desc_idx];
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
+			if (txe_current->mbuf) {
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+				txe_current->mbuf = NULL;
 		}
 
-		m_seg = tx_pkt;
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
+		}
+
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
-
-			if (txe->mbuf)
-				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			ddesc = (volatile struct iavf_tx_desc *)
+					&desc_ring[desc_idx];
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
+
+			if (txe_current->mbuf)
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+
+			txe_current->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->qw1 |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
@@ -2865,7 +3194,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->tx_ring[desc].qw1;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..9852a89194 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -255,6 +277,52 @@ struct iavf_tx_queue {
 	uint8_t tc;
 };
 
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+
+static void iavf_dump_tx_entry(uint16_t txe_id, const struct iavf_tx_entry *txe)
+{
+	printf("txe %3d : next %3d, last %3d, mbuf 0x%p\n",
+		txe_id, txe->next_id, txe->last_id, txe->mbuf);
+}
+
+static void iavf_dump_tx_entry_ring(const struct iavf_tx_queue *txq)
+{
+	uint16_t i;
+
+	printf("port %d, queue %d :\n\n", txq->port_id, txq->queue_id);
+
+	printf("nb descriptors %d\n", txq->nb_tx_desc);
+	printf("tail %d\n", txq->tx_tail);
+	printf("nb used %d, nb free %d\n", txq->nb_used, txq->nb_free);
+	printf("last cleaned %d\n", txq->last_desc_cleaned);
+	printf("free threshold %d\n", txq->free_thresh);
+	printf("rs threshold %d\n\n", txq->rs_thresh);
+
+
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		iavf_dump_tx_entry(i, &txq->sw_ring[i]);
+}
+
+static void iavf_dump_tx_desc_ring(const struct iavf_tx_queue *txq)
+{
+	uint16_t i;
+
+	printf("port %3d, queue %d :\n\n", txq->port_id, txq->queue_id);
+	printf("nb descriptors %d\n", txq->nb_tx_desc);
+
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct iavf_tx_data_desc *txd = &txq->tx_ring[i];
+
+		printf("txid %3d - "
+		"QW0: 0x%04"PRIx16" %04"PRIx16" %04"PRIx16" %04"PRIx16", "
+		"QW1: 0x%04"PRIx16" %04"PRIx16" %04"PRIx16" %04"PRIx16"\n",
+	       i, 0, 0, 0, 0, 0, 0, 0,
+	       (const volatile uint16_t)(txd->qw1 & 0xF));
+	}
+}
+
+#endif
+
 /* Offload features */
 union iavf_tx_offload {
 	uint64_t data;
@@ -277,6 +345,8 @@ union iavf_tx_offload {
  * Flex-field 5: AUX1
  */
 struct iavf_32b_rx_flex_desc_comms {
+	union {
+		struct {
 	/* Qword 0 */
 	u8 rxdid;
 	u8 mir_id_umb_cast;
@@ -305,6 +375,101 @@ struct iavf_32b_rx_flex_desc_comms {
 		} flex;
 		__le32 ts_high;
 	} flex_ts;
+		};
+		struct {
+			/* Quad Word 0 */
+
+			u8 rxdid;	/**< Descriptor builder profile ID */
+
+			u8 mirror_id:6;
+			u8 umbcast:2;
+
+			__le16 ptype:10;
+			__le16 flexi_flags_0:6;
+
+			__le16 packet_length:14;
+			__le16 rsv_0:2;
+
+			__le16 hlen:11;
+			__le16 sph:1;
+			__le16 flexi_flags_1:4;
+
+			/* Quad Word 1 */
+			union {
+				__le16 status_error0;
+				struct {
+					__le16 status_error0_dd:1;
+					/* descriptor done */
+					__le16 status_error0_eop:1;
+					/* end of packet */
+					__le16 status_error0_hbo:1;
+					/* header buffer overflow */
+					__le16 status_error0_l3l4p:1;
+					/* l3/l4 integrity check */
+					__le16 status_error0_xsum:4;
+					/* checksum report */
+					__le16 status_error0_lpbk:1;
+					/* loopback */
+					__le16 status_error0_ipv6exadd:1;
+					/* ipv6 w/ dst options or routing hdr */
+					__le16 status_error0_rxe:1;
+					/* rcv mac errors */
+					__le16 status_error0_crcp:1;
+					/* ethernet crc present */
+					__le16 status_error0_rsshash:1;
+					/* rss hash valid */
+					__le16 status_error0_l2tag1p:1;
+					/* l2 tag 1 present */
+					__le16 status_error0_flexi_md0:1;
+					/* flexi md field 0 valid */
+					__le16 status_error0_flexi_md1:1;
+					/* flexi md field 1 valid */
+				};
+			};
+			__le16 l2tag1;
+			__le16 flex_meta0;	/**< flexi metadata field 0 */
+			__le16 flex_meta1;	/**< flexi metadata field 1 */
+
+			/* Quad Word 2 */
+			union {
+				__le16 status_error1;
+				struct {
+					__le16 status_error1_cpm:4;
+					/* Inline IPsec Crypto Status */
+					__le16 status_error1_udp_tunnel:1;
+					/* UDP tunnelled packet NAT-T/UDP-NAT */
+					__le16 status_error1_crypto:1;
+					/* Inline IPsec Crypto Offload */
+					__le16 status_error1_rsv:5;
+					/* Reserved */
+					__le16 status_error1_l2tag2p:1;
+					/* l2 tag 2 present */
+					__le16 status_error1_flexi_md2:1;
+					/* flexi md field 2 valid */
+					__le16 status_error1_flexi_md3:1;
+					/* flexi md field 3 valid */
+					__le16 status_error1_flexi_md4:1;
+					/* flexi md field 4 valid */
+					__le16 status_error1_flexi_md5:1;
+					/* flexi md field 5 valid */
+				};
+			};
+
+			u8 flex_flags2;
+			u8 time_stamp_low;
+
+			__le16 l2tag2_1st;			/**< L2TAG */
+			__le16 l2tag2_2nd;			/**< L2TAG */
+
+			/* Quad Word 3 */
+
+			__le16 flex_meta2;	/**< flexi metadata field 2 */
+			__le16 flex_meta3;	/**< flexi metadata field 3 */
+			__le16 flex_meta4;	/**< flexi metadata field 4 */
+			__le16 flex_meta5;	/**< flexi metadata field 5 */
+
+		} debug;
+	};
 };
 
 /* Rx Flex Descriptor
@@ -347,6 +512,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +565,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +593,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +609,24 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -510,8 +732,8 @@ uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
 						      struct rte_mbuf **rx_pkts,
 						      uint16_t nb_pkts);
 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
-							      struct rte_mbuf **rx_pkts,
-							      uint16_t nb_pkts);
+						struct rte_mbuf **rx_pkts,
+						uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
@@ -523,6 +745,100 @@ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 const uint32_t *iavf_get_default_ptype_table(void);
 
+static void iavf_dump_rx_flex_desc(const volatile
+		struct iavf_32b_rx_flex_desc_comms *desc)
+{
+	printf("QW0: rxdid          : (0x%x) %d\n", desc->debug.rxdid,
+			desc->debug.rxdid);
+	printf("QW0: mirror id      : %d\n", desc->debug.mirror_id);
+	printf("QW0: umbcast id     : %d\n", desc->debug.umbcast);
+	printf("QW0: mirror id      : (0x%x) %d\n", desc->debug.ptype,
+			desc->debug.ptype);
+	printf("QW0: flexi flags 0  : %x\n", desc->debug.flexi_flags_0);
+	printf("QW0: packet len     : %d\n", desc->debug.packet_length);
+	printf("QW0: header len     : %d\n", desc->debug.hlen);
+	printf("QW0: sph len        : %d\n", desc->debug.sph);
+	printf("QW0: flexi flags 1  : %x\n", desc->debug.flexi_flags_1);
+
+
+	printf("QW1: status/error 0 : 0x%x\n", desc->debug.status_error0);
+
+	printf("QW1: status/error 0 - dd         : 0x%x\n",
+			desc->debug.status_error0_dd);
+	printf("QW1: status/error 0 - eop        : 0x%x\n",
+			desc->debug.status_error0_eop);
+	printf("QW1: status/error 0 - hbo        : 0x%x\n",
+			desc->debug.status_error0_hbo);
+	printf("QW1: status/error 0 - l3l4p      : 0x%x\n",
+			desc->debug.status_error0_l3l4p);
+	printf("QW1: status/error 0 - xsum       : 0x%x\n",
+			desc->debug.status_error0_xsum);
+	printf("QW1: status/error 0 - lpbk       : 0x%x\n",
+			desc->debug.status_error0_lpbk);
+	printf("QW1: status/error 0 - ipv6extadd : 0x%x\n",
+			desc->debug.status_error0_ipv6exadd);
+	printf("QW1: status/error 0 - rxe        : 0x%x\n",
+			desc->debug.status_error0_rxe);
+	printf("QW1: status/error 0 - crcp       : 0x%x\n",
+			desc->debug.status_error0_crcp);
+	printf("QW1: status/error 0 - rsshash    : 0x%x\n",
+			desc->debug.status_error0_rsshash);
+	printf("QW1: status/error 0 - l2tag 1 p  : 0x%x\n",
+			desc->debug.status_error0_l2tag1p);
+	printf("QW1: status/error 0 - flexi md 0 : 0x%x\n",
+			desc->debug.status_error0_flexi_md0);
+	printf("QW1: status/error 0 - flexi md 1 : 0x%x\n",
+			desc->debug.status_error0_flexi_md1);
+
+	printf("QW1: l2tag1     : %d\n",
+		desc->debug.status_error0_l2tag1p ? desc->debug.l2tag1 : 0);
+	printf("QW1: flexi md 0 : 0x%x\n",
+		desc->debug.status_error0_flexi_md0 ?
+				desc->debug.flex_meta0 : 0);
+	printf("QW1: flexi md 1 : 0x%x\n",
+			desc->debug.status_error0_flexi_md1 ?
+					desc->debug.flex_meta1 : 0);
+
+
+	printf("QW2: status/error 1 : 0x%x\n", desc->debug.status_error1);
+
+	printf("QW2: status/error 1 - cpm status : 0x%x\n",
+			desc->debug.status_error1_cpm);
+	printf("QW2: status/error 1 - udp tunnel : 0x%x\n",
+			desc->debug.status_error1_udp_tunnel);
+	printf("QW2: status/error 1 - crypto     : 0x%x\n",
+			desc->debug.status_error1_crypto);
+	printf("QW2: status/error 1 - l2tag 2 p  : 0x%x\n",
+			desc->debug.status_error1_l2tag2p);
+	printf("QW2: status/error 1 - flexi md 2 : 0x%x\n",
+			desc->debug.status_error1_flexi_md2);
+	printf("QW2: status/error 1 - flexi md 3 : 0x%x\n",
+			desc->debug.status_error1_flexi_md3);
+	printf("QW2: status/error 1 - flexi md 4 : 0x%x\n",
+			desc->debug.status_error1_flexi_md4);
+	printf("QW2: status/error 1 - flexi md 5 : 0x%x\n",
+			desc->debug.status_error1_flexi_md5);
+
+
+	printf("QW2: flexi flags 2  : 0x%x\n", desc->debug.flex_flags2);
+	printf("QW2: timestamp low  : 0x%x\n", desc->debug.time_stamp_low);
+	printf("QW2: l2tag2_1       : 0x%x\n", desc->debug.l2tag2_1st);
+	printf("QW2: l2tag2_2       : 0x%x\n", desc->debug.l2tag2_2nd);
+
+	printf("QW3: flexi md 2     : 0x%x\n",
+			desc->debug.status_error1_flexi_md2 ?
+					desc->debug.flex_meta2 : 0);
+	printf("QW3: flexi md 3     : 0x%x\n",
+			desc->debug.status_error1_flexi_md3 ?
+					desc->debug.flex_meta3 : 0);
+	printf("QW3: flexi md 4     : 0x%x\n",
+			desc->debug.status_error1_flexi_md4 ?
+					desc->debug.flex_meta4 : 0);
+	printf("QW3: flexi md 5     : 0x%x\n",
+			desc->debug.status_error1_flexi_md5 ?
+					desc->debug.flex_meta5 : 0);
+}
+
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
 			    const volatile void *desc,
@@ -541,9 +857,235 @@ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
 	       " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
 	       rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
 	       rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+
+	iavf_dump_rx_flex_desc(desc);
 #endif
 }
 
+static uint8_t cipherblock_sz(uint8_t blksz)
+{
+	switch (blksz) {
+	case 2:
+		return 8;
+	case 3:
+		return 16;
+	}
+
+	return 0;
+}
+
+static void iavf_dump_tx_ctx_desc(const volatile
+		struct iavf_tx_context_desc *desc)
+{
+	struct iavf_tx_context_desc ctx;
+
+	ctx.qw0 = rte_le_to_cpu_64(desc->qw0);
+	ctx.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	const char *eipt, *l4tunt;
+
+	const char *eipt_no_exip = "no_exip";
+	const char *eipt_ip6 = "ip6";
+	const char *eipt_ip4_no_checksum = "ip4_no_checksum";
+	const char *eipt_ip4_w_checksum = "ip4_w_checksum";
+
+	const char *l4tunt_no_udp_gre = "no_udp_gre";
+	const char *l4tunt_udp = "udp";
+	const char *l4tunt_gre = "gre";
+
+	switch (ctx.debug.tunneling & 0x3) {
+	case 1:
+		eipt = eipt_ip6;
+		break;
+	case 2:
+		eipt = eipt_ip4_no_checksum;
+		break;
+	case 3:
+		eipt = eipt_ip4_w_checksum;
+		break;
+	default:
+		eipt = eipt_no_exip;
+	}
+
+	switch ((ctx.debug.tunneling & 0x600) >> 9) {
+	case 0:
+		l4tunt = l4tunt_no_udp_gre;
+		break;
+	case 1:
+		l4tunt = l4tunt_udp;
+		break;
+	case 2:
+		l4tunt = l4tunt_gre;
+		break;
+	default:
+		l4tunt = "invalid value set for l4 tunnel type ";
+	}
+
+	printf("QW0: Tunnel EIPT : (%d) %s\n", ctx.debug.tunneling & 0x3, eipt);
+	printf("QW0: Tunnel EIPLEN : %d\n",
+			(uint32_t)(((ctx.debug.tunneling >>
+				IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT) &
+				IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK) << 2));
+	printf("QW0: Tunnel EIP_NOINC : %d\n",
+			(ctx.debug.tunneling >> 11) & 0x1);
+
+	printf("QW0: Tunnel L4TUNT : (%d) %s\n",
+			(ctx.debug.tunneling & 0x600) >> 9, l4tunt);
+	printf("QW0: Tunnel L4TUNLEN : (%d)\n",
+			(ctx.debug.tunneling >> 12) & 0x7F);
+
+	printf("QW0: Tunnel DEC Inner TTL : %d\n", 0);
+	printf("QW0: Tunnel UDP Checksum : %d\n", 0);
+
+	printf("QW0: L2TAG1 : %d\n", ctx.l2tag2);
+
+	printf("QW1: DTYP: %d\n", ctx.debug.type);
+
+	printf("QW1: Cmd TSO          : %x\n", (ctx.debug.cmd >> 0) & 0x1);
+	printf("QW1: Cmd TSYN         : %x\n", (ctx.debug.cmd >> 1) & 0x1);
+	printf("QW1: Cmd IL2TAG2      : %x\n", (ctx.debug.cmd >> 2) & 0x1);
+	printf("QW1: Cmd IL2TAG2_IL2H : %x\n", (ctx.debug.cmd >> 3) & 0x1);
+	printf("QW1: Cmd SWITCH       : %x\n", (ctx.debug.cmd >> 4) & 0x3);
+
+	printf("QW1: IPsec Cipher Block Sz: %d\n",
+			cipherblock_sz(ctx.debug.ipsec & 0x7));
+	printf("QW1: IPsec ICV Sz         : %d\n", (ctx.debug.ipsec >> 3) << 2);
+
+	printf("QW1: TLength: %d\n", ctx.debug.tlen_tsyn);
+	printf("QW1: MSS: %d\n", ctx.debug.mss_target_vsi);
+}
+
+#include <netinet/in.h>
+
+static const char *ipproto_to_str(uint8_t ipproto)
+{
+	switch (ipproto) {
+	case IPPROTO_IP:
+		return "Dummy";
+	case IPPROTO_IPIP:
+		return "IPIP";
+	case IPPROTO_TCP:
+		return "TCP";
+	case IPPROTO_UDP:
+		return "UDP";
+	case IPPROTO_ESP:
+		return "ESP";
+	case IPPROTO_AH:
+		return "AH";
+	case IPPROTO_IPV6:
+		return "IPV6";
+	case IPPROTO_SCTP:
+		return "SCTP";
+	case IPPROTO_RAW:
+		return "RAW";
+	}
+
+	return "Unknown";
+}
+
+static void iavf_dump_tx_ipsec_desc(const volatile
+		struct iavf_tx_ipsec_desc *desc)
+{
+	struct iavf_tx_ipsec_desc ipsec;
+	uint16_t ivlen = 0;
+
+	ipsec.qw0 = rte_le_to_cpu_64(desc->qw0);
+	ipsec.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	switch (ipsec.ivlen) {
+	case 1:
+		ivlen = 4;
+		break;
+	case 2:
+		ivlen = 8;
+		break;
+	case 3:
+		ivlen = 16;
+		break;
+	}
+
+	printf("QW0: L4 Payload Length: %d\n", ipsec.l4payload_length);
+	printf("QW0: ESN : %d\n", ipsec.esn);
+	printf("QW0: ESP Trailer Length: %d\n", ipsec.trailer_length);
+
+	printf("QW1: DTYP: %d\n", ipsec.type);
+	printf("QW1: UDP: %s\n", ipsec.udp ? "yes" : "no");
+	printf("QW1: IV Length: %d\n", ivlen);
+	printf("QW1: Next Proto: (%d) %s\n", ipsec.next_header,
+			ipproto_to_str(ipsec.next_header));
+	printf("QW1: IPv6 Extension Headers Length: %d\n",
+			ipsec.ipv6_ext_hdr_length);
+	printf("QW1: SAID: %d\n", ipsec.said);
+}
+
+static const char *iipt_to_str(uint8_t iipt)
+{
+	switch (iipt) {
+	case 0:
+		return "Non IP packet / not defined";
+	case 1:
+		return "IPv6";
+	case 2:
+		return "IPv4 w/ no IP Checksum";
+	case 3:
+		return "IPv4 w/ IP Checksum";
+	}
+
+	return "";
+}
+
+static const char *l4t_to_str(uint8_t l4t)
+{
+	switch (l4t) {
+	case 0:
+		return "unknown / fragment";
+	case 1:
+		return "TCP";
+	case 2:
+		return "SCTP";
+	case 3:
+		return "UDP";
+	}
+
+	return "";
+}
+
+static void iavf_dump_tx_data_desc(const volatile struct iavf_tx_desc *desc)
+{
+	struct iavf_tx_desc data;
+
+
+	data.qw0 = rte_le_to_cpu_64(desc->qw0);
+	data.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	printf("QW0: Buffer Address : 0x%016"PRIx64"\n",
+			data.debug.buffer_addr);
+
+	printf("QW1: Dtype : %d\n", data.debug.type);
+
+	printf("QW1: Cmd : %x\n", data.debug.cmd);
+	printf("QW1: Cmd EOP     : %x\n", (data.debug.cmd >> 0) & 0x1);
+	printf("QW1: Cmd RS      : %x\n", (data.debug.cmd >> 1) & 0x1);
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 2) & 0x1);
+	printf("QW1: Cmd IL2TAG1 : %x\n", (data.debug.cmd >> 3) & 0x1);
+	printf("QW1: Cmd DUMMY   : %x\n", (data.debug.cmd >> 4) & 0x1);
+	printf("QW1: Cmd IIPT    : (%x) %s\n", (data.debug.cmd >> 5) & 0x3,
+			iipt_to_str((data.debug.cmd >> 5) & 0x3));
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 7) & 0x1);
+	printf("QW1: Cmd L4T     : (%x) %s\n", (data.debug.cmd >> 8) & 0x3,
+			l4t_to_str((data.debug.cmd >> 8) & 0x3));
+	printf("QW1: Cmd RE      : %x\n", (data.debug.cmd >> 10) & 0x1);
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 11) & 0x1);
+
+	printf("QW1: Offset L2  : %d\n", data.debug.offset_l2len << 1);
+	printf("QW1: Offset L3  : %d\n", data.debug.offset_l3len << 2);
+	printf("QW1: Offset L4  : %d\n", data.debug.offset_l4len << 2);
+
+	printf("QW1: Tx Buf Sz  : %d\n", data.debug.buffer_sz);
+
+	printf("QW1: l2tag1 : %d\n", data.debug.l2tag1);
+}
+
 /* All the descriptors are 16 bytes, so just use one of them
  * to print the qwords
  */
@@ -555,24 +1097,29 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(tx_desc->qw1 &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
-		name = "Tx_data_desc";
+		name = "Data Tx Desc: ";
+		iavf_dump_tx_data_desc(desc);
 		break;
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
-		name = "Tx_context_desc";
+		name = "Context Tx Desc: ";
+		iavf_dump_tx_ctx_desc(desc);
+		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "IPsec Tx Desc: ";
+		iavf_dump_tx_ipsec_desc(desc);
 		break;
 	default:
-		name = "unknown_desc";
+		name = "Unknown Tx Desc: ";
 		break;
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->qw0, tx_desc->qw1);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index bf87696fa4..9f6658d9d7 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 06dc663947..13365b5c2b 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -23,8 +23,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1190,7 +1222,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1217,7 +1249,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1252,7 +1284,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1292,7 +1324,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1319,7 +1351,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1346,7 +1378,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1406,7 +1438,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1453,7 +1485,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1494,7 +1526,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1517,7 +1549,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1543,7 +1575,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1564,7 +1596,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1597,7 +1629,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1647,7 +1679,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1692,7 +1724,7 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 	 * before iavf_read_msg_from_pf.
 	 */
 	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	rte_intr_enable(&pci_dev->intr_handle);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
@@ -1728,7 +1760,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
@@ -1741,3 +1773,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index f2010a8337..385770b043 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -10,7 +10,7 @@ endif
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -20,6 +20,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH 3/4] net/iavf: Add xstats support for inline IPsec crypto
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 1/4] common/iavf: " Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 2/4] net/iavf: " Radu Nicolau
@ 2021-09-09 14:24 ` Radu Nicolau
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
                   ` (13 subsequent siblings)
  16 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-09 14:24 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 934ef48278..d5f574b4b3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -101,7 +120,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d4f5d123e2..2a747e54a6 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -89,6 +89,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -144,21 +145,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -176,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1552,7 +1569,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1614,7 +1631,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1634,6 +1662,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1644,6 +1693,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1656,11 +1706,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 9852a89194..5bdd43bcc0 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH 4/4] net/iavf: add watchdog for VFLR
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (2 preceding siblings ...)
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-09-09 14:24 ` Radu Nicolau
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (12 subsequent siblings)
  16 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-09 14:24 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  6 +++
 drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d5f574b4b3..4481d2e134 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -212,6 +212,12 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	struct {
+		uint8_t enabled:1;
+		uint64_t period_us;
+	} watchdog;
+	/** iAVF watchdog configuration */
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 2a747e54a6..e06b86b982 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -24,6 +24,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog.enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event has been detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
+{
+	int rc;
+
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 1;
+	adapter->vf.watchdog.period_us = period_us;
+
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, (void *)adapter);
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
+{
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 0;
+	adapter->vf.watchdog.period_us = 0;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2432,6 +2521,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog, set polling period to 500us */
+	iavf_dev_watchdog_enable(adapter, 500);
+
+
 	return 0;
 }
 
@@ -2502,6 +2596,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (3 preceding siblings ...)
  2021-09-09 14:24 ` [dpdk-dev] [PATCH 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-09-15 13:32 ` Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 1/4] common/iavf: " Radu Nicolau
                     ` (3 more replies)
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (11 subsequent siblings)
  16 siblings, 4 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-15 13:32 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (4):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: Add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |  215 +-
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   53 +-
 drivers/net/iavf/iavf_ethdev.c                |  222 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1918 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  729 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  567 ++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4615 insertions(+), 339 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] common/iavf: add iAVF IPsec inline crypto support
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-09-15 13:32   ` Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 2/4] net/iavf: " Radu Nicolau
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-15 13:32 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             | 215 +++++++-
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 775 insertions(+), 10 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..1f8f8ae5fd 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -709,11 +709,29 @@ enum iavf_rx_prog_status_desc_error_bits {
 #define IAVF_FOUR_BIT_MASK	0xF
 #define IAVF_EIGHTEEN_BIT_MASK	0x3FFFF
 
-/* TX Descriptor */
+/* TX Data Descriptor */
 struct iavf_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
+	union {
+		struct {
+			__le64 buffer_addr; /* Addr of descriptor's data buf */
+			__le64 cmd_type_offset_bsz;
+		};
+		struct {
+			__le64 qw0; /**< data buffer address */
+			__le64 qw1; /**< dtyp, cmd, offset, buf_sz and l2tag1 */
+		};
+		struct {
+			__le64 buffer_addr;	/**< Data buffer address */
+			__le64 type:4;		/**< Descriptor type */
+			__le64 cmd:12;		/**< Command field */
+			__le64 offset_l2len:7;	/**< L2 header length */
+			__le64 offset_l3len:7;	/**< L3 header length */
+			__le64 offset_l4len:4;	/**< L4 header length */
+			__le64 buffer_sz:14;	/**< Data buffer size */
+			__le64 l2tag1:16;	/**< L2 Tag 1 value */
+		} debug __rte_packed;
+	};
+} __rte_packed;
 
 #define IAVF_TXD_QW1_DTYPE_SHIFT	0
 #define IAVF_TXD_QW1_DTYPE_MASK		(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -723,6 +741,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
@@ -734,7 +753,7 @@ enum iavf_tx_desc_dtype_value {
 #define IAVF_TXD_QW1_CMD_SHIFT	4
 #define IAVF_TXD_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
 
-enum iavf_tx_desc_cmd_bits {
+enum iavf_tx_data_desc_cmd_bits {
 	IAVF_TX_DESC_CMD_EOP			= 0x0001,
 	IAVF_TX_DESC_CMD_RS			= 0x0002,
 	IAVF_TX_DESC_CMD_ICRC			= 0x0004,
@@ -778,18 +797,79 @@ enum iavf_tx_desc_length_fields {
 #define IAVF_TXD_QW1_L2TAG1_SHIFT	48
 #define IAVF_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
 
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
 /* Context descriptors */
 struct iavf_tx_context_desc {
+	union {
+		struct {
 	__le32 tunneling_params;
 	__le16 l2tag2;
 	__le16 rsvd;
 	__le64 type_cmd_tso_mss;
 };
-
-#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	0
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le32 tunneling;
+			__le16 l2tag2;
+			__le16 rsvd0;
+			__le64 type:4;
+			__le64 cmd:7;
+			__le64 ipsec:7;
+			__le64 rsvd1:12;
+			__le64 tlen_tsyn:18;
+			__le64 rsvd2:2;
+			__le64 mss_target_vsi:14;
+		} debug __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
 
-#define IAVF_TXD_CTX_QW1_CMD_SHIFT	4
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT	(4)
 #define IAVF_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
 
 enum iavf_tx_ctx_desc_cmd_bits {
@@ -804,6 +884,63 @@ enum iavf_tx_ctx_desc_cmd_bits {
 	IAVF_TX_CTX_DESC_SWPE		= 0x40
 };
 
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
 struct iavf_nop_desc {
 	__le64 rsvd;
 	__le64 dtype_cmd;
@@ -911,6 +1048,68 @@ enum iavf_tx_ctx_desc_eipt_offload {
 #define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT	23
 #define IAVF_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
 
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
 /* Statistics collected by each port, VSI, VEB, and S-channel */
 struct iavf_eth_stats {
 	u64 rx_bytes;			/* gorc */
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 1cf0866124..efb4cca197 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -226,6 +229,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -388,7 +393,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2320,6 +2325,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] net/iavf: add iAVF IPsec inline crypto support
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 1/4] common/iavf: " Radu Nicolau
@ 2021-09-15 13:32   ` Radu Nicolau
  2021-09-18  5:28     ` Wu, Jingjing
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
  3 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-09-15 13:32 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev,
	Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h                       |   26 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1918 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  729 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  579 ++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 14 files changed, 3660 insertions(+), 313 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd078111..934ef48278 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -189,6 +189,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -216,6 +217,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -238,6 +240,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -249,11 +252,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -272,6 +278,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -340,9 +348,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
@@ -399,5 +422,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 887b8b045b..8a562e0942 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -29,6 +29,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -70,6 +71,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -974,6 +985,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1730,6 +1746,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1738,8 +1755,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2332,6 +2349,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		return ret;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 1fe270fb22..d85e82a950 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..3776fcf9d6
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1918 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+		uint16_t mss;
+	} tso;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return 0;
+
+	if (increment == 0)
+		return 1;
+
+	if ((len - min) % increment)
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return 0;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return 0;
+
+	return 1;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Unsupported action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		RTE_ASSERT("we should be here");
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		RTE_ASSERT("we should be here");
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		RTE_ASSERT(1);
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	/* verify that key length is with bounds of key_data array */
+	RTE_ASSERT(sizeof(cfg->key_data) <= cfg->key_len);
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = htonl(conf->ipsec.spi);
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.tso) {
+		sess->tso.enabled = 1;
+		sess->tso.mss = conf->ipsec.mss;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return 0;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return 0;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return 1;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return 0;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	/* update TSO MSS size */
+	if (iavf_sess->tso.enabled && conf->ipsec.options.tso)
+		iavf_sess->tso.mss = conf->ipsec.mss;
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return 1;
+
+	return 0;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY
+	                && actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return 1;
+	}
+	return 0;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..d8d7d6649e
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index e33fe4576b..a31cffc193 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -500,6 +504,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -684,6 +694,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -728,9 +740,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -754,6 +766,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1044,29 +1060,97 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
+	}
 #endif
+}
 
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
 	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
 }
 
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1384,7 +1468,9 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1526,7 +1612,9 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1764,7 +1852,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2034,7 +2124,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+	if ((txd[desc_to_clean_to].qw1 &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
@@ -2050,7 +2140,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 					last_desc_cleaned);
 
-	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+	txd[desc_to_clean_to].qw1 = 0;
 
 	txq->last_desc_cleaned = desc_to_clean_to;
 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -2058,190 +2148,363 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
+{
+	/* fill descriptor type field */
+	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc->qw1, ipsec_md);
+
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
+				m, ipsec_md);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc->qw0, m);
+	else
+		desc->qw0 = 0;
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
 {
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->qw1 = desc_template;
+
+	/* set data buffer address */
+	desc->qw0 = rte_mbuf_data_iova(m);
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
 
-	return ctx_desc;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
+	/* set data buffer size */
+	desc->qw1 |= (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
+}
+
+
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
 {
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
 }
 
 /* TX function */
 uint16_t
-iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+iavf_xmit_pkts(void *tx_queue, struct rte_mbuf *mbufs[], uint16_t nb_mbufs)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
-	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *desc_ring = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
+	struct iavf_tx_entry *txe_current, *txe_next;
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	desc_idx = txq->tx_tail;
+	txe_current = &txe_ring[desc_idx];
 
-		tx_pkt = *tx_pkts++;
-		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
+
+	for (idx = 0; idx < nb_mbufs; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = mbufs[idx];
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		RTE_MBUF_PREFETCH_TO_FREE(txe_current->mbuf);
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
+
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2249,122 +2512,114 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
-
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&desc_ring[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
 
-			txn = &sw_ring[txe->next_id];
-			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-			if (txe->mbuf) {
-				rte_pktmbuf_free_seg(txe->mbuf);
-				txe->mbuf = NULL;
+			if (txe_current->mbuf) {
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+				txe_current->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&desc_ring[desc_idx];
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
+
+			if (txe_current->mbuf) {
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+				txe_current->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
 		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
-
-			if (txe->mbuf)
-				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			ddesc = (volatile struct iavf_tx_desc *)
+					&desc_ring[desc_idx];
+
+			txe_next = &txe_ring[txe_current->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txe_next->mbuf);
+
+			if (txe_current->mbuf)
+				rte_pktmbuf_free_seg(txe_current->mbuf);
+
+			txe_current->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe_current->last_id = desc_idx_last;
+			desc_idx = txe_current->next_id;
+			txe_current = txe_next;
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->qw1 |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
@@ -2865,7 +3120,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->tx_ring[desc].qw1;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..9852a89194 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -255,6 +277,52 @@ struct iavf_tx_queue {
 	uint8_t tc;
 };
 
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+
+static void iavf_dump_tx_entry(uint16_t txe_id, const struct iavf_tx_entry *txe)
+{
+	printf("txe %3d : next %3d, last %3d, mbuf 0x%p\n",
+		txe_id, txe->next_id, txe->last_id, txe->mbuf);
+}
+
+static void iavf_dump_tx_entry_ring(const struct iavf_tx_queue *txq)
+{
+	uint16_t i;
+
+	printf("port %d, queue %d :\n\n", txq->port_id, txq->queue_id);
+
+	printf("nb descriptors %d\n", txq->nb_tx_desc);
+	printf("tail %d\n", txq->tx_tail);
+	printf("nb used %d, nb free %d\n", txq->nb_used, txq->nb_free);
+	printf("last cleaned %d\n", txq->last_desc_cleaned);
+	printf("free threshold %d\n", txq->free_thresh);
+	printf("rs threshold %d\n\n", txq->rs_thresh);
+
+
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		iavf_dump_tx_entry(i, &txq->sw_ring[i]);
+}
+
+static void iavf_dump_tx_desc_ring(const struct iavf_tx_queue *txq)
+{
+	uint16_t i;
+
+	printf("port %3d, queue %d :\n\n", txq->port_id, txq->queue_id);
+	printf("nb descriptors %d\n", txq->nb_tx_desc);
+
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct iavf_tx_data_desc *txd = &txq->tx_ring[i];
+
+		printf("txid %3d - "
+		"QW0: 0x%04"PRIx16" %04"PRIx16" %04"PRIx16" %04"PRIx16", "
+		"QW1: 0x%04"PRIx16" %04"PRIx16" %04"PRIx16" %04"PRIx16"\n",
+	       i, 0, 0, 0, 0, 0, 0, 0,
+	       (const volatile uint16_t)(txd->qw1 & 0xF));
+	}
+}
+
+#endif
+
 /* Offload features */
 union iavf_tx_offload {
 	uint64_t data;
@@ -277,6 +345,8 @@ union iavf_tx_offload {
  * Flex-field 5: AUX1
  */
 struct iavf_32b_rx_flex_desc_comms {
+	union {
+		struct {
 	/* Qword 0 */
 	u8 rxdid;
 	u8 mir_id_umb_cast;
@@ -305,6 +375,101 @@ struct iavf_32b_rx_flex_desc_comms {
 		} flex;
 		__le32 ts_high;
 	} flex_ts;
+		};
+		struct {
+			/* Quad Word 0 */
+
+			u8 rxdid;	/**< Descriptor builder profile ID */
+
+			u8 mirror_id:6;
+			u8 umbcast:2;
+
+			__le16 ptype:10;
+			__le16 flexi_flags_0:6;
+
+			__le16 packet_length:14;
+			__le16 rsv_0:2;
+
+			__le16 hlen:11;
+			__le16 sph:1;
+			__le16 flexi_flags_1:4;
+
+			/* Quad Word 1 */
+			union {
+				__le16 status_error0;
+				struct {
+					__le16 status_error0_dd:1;
+					/* descriptor done */
+					__le16 status_error0_eop:1;
+					/* end of packet */
+					__le16 status_error0_hbo:1;
+					/* header buffer overflow */
+					__le16 status_error0_l3l4p:1;
+					/* l3/l4 integrity check */
+					__le16 status_error0_xsum:4;
+					/* checksum report */
+					__le16 status_error0_lpbk:1;
+					/* loopback */
+					__le16 status_error0_ipv6exadd:1;
+					/* ipv6 w/ dst options or routing hdr */
+					__le16 status_error0_rxe:1;
+					/* rcv mac errors */
+					__le16 status_error0_crcp:1;
+					/* ethernet crc present */
+					__le16 status_error0_rsshash:1;
+					/* rss hash valid */
+					__le16 status_error0_l2tag1p:1;
+					/* l2 tag 1 present */
+					__le16 status_error0_flexi_md0:1;
+					/* flexi md field 0 valid */
+					__le16 status_error0_flexi_md1:1;
+					/* flexi md field 1 valid */
+				};
+			};
+			__le16 l2tag1;
+			__le16 flex_meta0;	/**< flexi metadata field 0 */
+			__le16 flex_meta1;	/**< flexi metadata field 1 */
+
+			/* Quad Word 2 */
+			union {
+				__le16 status_error1;
+				struct {
+					__le16 status_error1_cpm:4;
+					/* Inline IPsec Crypto Status */
+					__le16 status_error1_udp_tunnel:1;
+					/* UDP tunnelled packet NAT-T/UDP-NAT */
+					__le16 status_error1_crypto:1;
+					/* Inline IPsec Crypto Offload */
+					__le16 status_error1_rsv:5;
+					/* Reserved */
+					__le16 status_error1_l2tag2p:1;
+					/* l2 tag 2 present */
+					__le16 status_error1_flexi_md2:1;
+					/* flexi md field 2 valid */
+					__le16 status_error1_flexi_md3:1;
+					/* flexi md field 3 valid */
+					__le16 status_error1_flexi_md4:1;
+					/* flexi md field 4 valid */
+					__le16 status_error1_flexi_md5:1;
+					/* flexi md field 5 valid */
+				};
+			};
+
+			u8 flex_flags2;
+			u8 time_stamp_low;
+
+			__le16 l2tag2_1st;			/**< L2TAG */
+			__le16 l2tag2_2nd;			/**< L2TAG */
+
+			/* Quad Word 3 */
+
+			__le16 flex_meta2;	/**< flexi metadata field 2 */
+			__le16 flex_meta3;	/**< flexi metadata field 3 */
+			__le16 flex_meta4;	/**< flexi metadata field 4 */
+			__le16 flex_meta5;	/**< flexi metadata field 5 */
+
+		} debug;
+	};
 };
 
 /* Rx Flex Descriptor
@@ -347,6 +512,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +565,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +593,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +609,24 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -510,8 +732,8 @@ uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
 						      struct rte_mbuf **rx_pkts,
 						      uint16_t nb_pkts);
 uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
-							      struct rte_mbuf **rx_pkts,
-							      uint16_t nb_pkts);
+						struct rte_mbuf **rx_pkts,
+						uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
@@ -523,6 +745,100 @@ uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 const uint32_t *iavf_get_default_ptype_table(void);
 
+static void iavf_dump_rx_flex_desc(const volatile
+		struct iavf_32b_rx_flex_desc_comms *desc)
+{
+	printf("QW0: rxdid          : (0x%x) %d\n", desc->debug.rxdid,
+			desc->debug.rxdid);
+	printf("QW0: mirror id      : %d\n", desc->debug.mirror_id);
+	printf("QW0: umbcast id     : %d\n", desc->debug.umbcast);
+	printf("QW0: mirror id      : (0x%x) %d\n", desc->debug.ptype,
+			desc->debug.ptype);
+	printf("QW0: flexi flags 0  : %x\n", desc->debug.flexi_flags_0);
+	printf("QW0: packet len     : %d\n", desc->debug.packet_length);
+	printf("QW0: header len     : %d\n", desc->debug.hlen);
+	printf("QW0: sph len        : %d\n", desc->debug.sph);
+	printf("QW0: flexi flags 1  : %x\n", desc->debug.flexi_flags_1);
+
+
+	printf("QW1: status/error 0 : 0x%x\n", desc->debug.status_error0);
+
+	printf("QW1: status/error 0 - dd         : 0x%x\n",
+			desc->debug.status_error0_dd);
+	printf("QW1: status/error 0 - eop        : 0x%x\n",
+			desc->debug.status_error0_eop);
+	printf("QW1: status/error 0 - hbo        : 0x%x\n",
+			desc->debug.status_error0_hbo);
+	printf("QW1: status/error 0 - l3l4p      : 0x%x\n",
+			desc->debug.status_error0_l3l4p);
+	printf("QW1: status/error 0 - xsum       : 0x%x\n",
+			desc->debug.status_error0_xsum);
+	printf("QW1: status/error 0 - lpbk       : 0x%x\n",
+			desc->debug.status_error0_lpbk);
+	printf("QW1: status/error 0 - ipv6extadd : 0x%x\n",
+			desc->debug.status_error0_ipv6exadd);
+	printf("QW1: status/error 0 - rxe        : 0x%x\n",
+			desc->debug.status_error0_rxe);
+	printf("QW1: status/error 0 - crcp       : 0x%x\n",
+			desc->debug.status_error0_crcp);
+	printf("QW1: status/error 0 - rsshash    : 0x%x\n",
+			desc->debug.status_error0_rsshash);
+	printf("QW1: status/error 0 - l2tag 1 p  : 0x%x\n",
+			desc->debug.status_error0_l2tag1p);
+	printf("QW1: status/error 0 - flexi md 0 : 0x%x\n",
+			desc->debug.status_error0_flexi_md0);
+	printf("QW1: status/error 0 - flexi md 1 : 0x%x\n",
+			desc->debug.status_error0_flexi_md1);
+
+	printf("QW1: l2tag1     : %d\n",
+		desc->debug.status_error0_l2tag1p ? desc->debug.l2tag1 : 0);
+	printf("QW1: flexi md 0 : 0x%x\n",
+		desc->debug.status_error0_flexi_md0 ?
+				desc->debug.flex_meta0 : 0);
+	printf("QW1: flexi md 1 : 0x%x\n",
+			desc->debug.status_error0_flexi_md1 ?
+					desc->debug.flex_meta1 : 0);
+
+
+	printf("QW2: status/error 1 : 0x%x\n", desc->debug.status_error1);
+
+	printf("QW2: status/error 1 - cpm status : 0x%x\n",
+			desc->debug.status_error1_cpm);
+	printf("QW2: status/error 1 - udp tunnel : 0x%x\n",
+			desc->debug.status_error1_udp_tunnel);
+	printf("QW2: status/error 1 - crypto     : 0x%x\n",
+			desc->debug.status_error1_crypto);
+	printf("QW2: status/error 1 - l2tag 2 p  : 0x%x\n",
+			desc->debug.status_error1_l2tag2p);
+	printf("QW2: status/error 1 - flexi md 2 : 0x%x\n",
+			desc->debug.status_error1_flexi_md2);
+	printf("QW2: status/error 1 - flexi md 3 : 0x%x\n",
+			desc->debug.status_error1_flexi_md3);
+	printf("QW2: status/error 1 - flexi md 4 : 0x%x\n",
+			desc->debug.status_error1_flexi_md4);
+	printf("QW2: status/error 1 - flexi md 5 : 0x%x\n",
+			desc->debug.status_error1_flexi_md5);
+
+
+	printf("QW2: flexi flags 2  : 0x%x\n", desc->debug.flex_flags2);
+	printf("QW2: timestamp low  : 0x%x\n", desc->debug.time_stamp_low);
+	printf("QW2: l2tag2_1       : 0x%x\n", desc->debug.l2tag2_1st);
+	printf("QW2: l2tag2_2       : 0x%x\n", desc->debug.l2tag2_2nd);
+
+	printf("QW3: flexi md 2     : 0x%x\n",
+			desc->debug.status_error1_flexi_md2 ?
+					desc->debug.flex_meta2 : 0);
+	printf("QW3: flexi md 3     : 0x%x\n",
+			desc->debug.status_error1_flexi_md3 ?
+					desc->debug.flex_meta3 : 0);
+	printf("QW3: flexi md 4     : 0x%x\n",
+			desc->debug.status_error1_flexi_md4 ?
+					desc->debug.flex_meta4 : 0);
+	printf("QW3: flexi md 5     : 0x%x\n",
+			desc->debug.status_error1_flexi_md5 ?
+					desc->debug.flex_meta5 : 0);
+}
+
 static inline
 void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
 			    const volatile void *desc,
@@ -541,9 +857,235 @@ void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
 	       " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", rxq->queue_id,
 	       rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
 	       rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+
+	iavf_dump_rx_flex_desc(desc);
 #endif
 }
 
+static uint8_t cipherblock_sz(uint8_t blksz)
+{
+	switch (blksz) {
+	case 2:
+		return 8;
+	case 3:
+		return 16;
+	}
+
+	return 0;
+}
+
+static void iavf_dump_tx_ctx_desc(const volatile
+		struct iavf_tx_context_desc *desc)
+{
+	struct iavf_tx_context_desc ctx;
+
+	ctx.qw0 = rte_le_to_cpu_64(desc->qw0);
+	ctx.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	const char *eipt, *l4tunt;
+
+	const char *eipt_no_exip = "no_exip";
+	const char *eipt_ip6 = "ip6";
+	const char *eipt_ip4_no_checksum = "ip4_no_checksum";
+	const char *eipt_ip4_w_checksum = "ip4_w_checksum";
+
+	const char *l4tunt_no_udp_gre = "no_udp_gre";
+	const char *l4tunt_udp = "udp";
+	const char *l4tunt_gre = "gre";
+
+	switch (ctx.debug.tunneling & 0x3) {
+	case 1:
+		eipt = eipt_ip6;
+		break;
+	case 2:
+		eipt = eipt_ip4_no_checksum;
+		break;
+	case 3:
+		eipt = eipt_ip4_w_checksum;
+		break;
+	default:
+		eipt = eipt_no_exip;
+	}
+
+	switch ((ctx.debug.tunneling & 0x600) >> 9) {
+	case 0:
+		l4tunt = l4tunt_no_udp_gre;
+		break;
+	case 1:
+		l4tunt = l4tunt_udp;
+		break;
+	case 2:
+		l4tunt = l4tunt_gre;
+		break;
+	default:
+		l4tunt = "invalid value set for l4 tunnel type ";
+	}
+
+	printf("QW0: Tunnel EIPT : (%d) %s\n", ctx.debug.tunneling & 0x3, eipt);
+	printf("QW0: Tunnel EIPLEN : %d\n",
+			(uint32_t)(((ctx.debug.tunneling >>
+				IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT) &
+				IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK) << 2));
+	printf("QW0: Tunnel EIP_NOINC : %d\n",
+			(ctx.debug.tunneling >> 11) & 0x1);
+
+	printf("QW0: Tunnel L4TUNT : (%d) %s\n",
+			(ctx.debug.tunneling & 0x600) >> 9, l4tunt);
+	printf("QW0: Tunnel L4TUNLEN : (%d)\n",
+			(ctx.debug.tunneling >> 12) & 0x7F);
+
+	printf("QW0: Tunnel DEC Inner TTL : %d\n", 0);
+	printf("QW0: Tunnel UDP Checksum : %d\n", 0);
+
+	printf("QW0: L2TAG1 : %d\n", ctx.l2tag2);
+
+	printf("QW1: DTYP: %d\n", ctx.debug.type);
+
+	printf("QW1: Cmd TSO          : %x\n", (ctx.debug.cmd >> 0) & 0x1);
+	printf("QW1: Cmd TSYN         : %x\n", (ctx.debug.cmd >> 1) & 0x1);
+	printf("QW1: Cmd IL2TAG2      : %x\n", (ctx.debug.cmd >> 2) & 0x1);
+	printf("QW1: Cmd IL2TAG2_IL2H : %x\n", (ctx.debug.cmd >> 3) & 0x1);
+	printf("QW1: Cmd SWITCH       : %x\n", (ctx.debug.cmd >> 4) & 0x3);
+
+	printf("QW1: IPsec Cipher Block Sz: %d\n",
+			cipherblock_sz(ctx.debug.ipsec & 0x7));
+	printf("QW1: IPsec ICV Sz         : %d\n", (ctx.debug.ipsec >> 3) << 2);
+
+	printf("QW1: TLength: %d\n", ctx.debug.tlen_tsyn);
+	printf("QW1: MSS: %d\n", ctx.debug.mss_target_vsi);
+}
+
+#include <netinet/in.h>
+
+static const char *ipproto_to_str(uint8_t ipproto)
+{
+	switch (ipproto) {
+	case IPPROTO_IP:
+		return "Dummy";
+	case IPPROTO_IPIP:
+		return "IPIP";
+	case IPPROTO_TCP:
+		return "TCP";
+	case IPPROTO_UDP:
+		return "UDP";
+	case IPPROTO_ESP:
+		return "ESP";
+	case IPPROTO_AH:
+		return "AH";
+	case IPPROTO_IPV6:
+		return "IPV6";
+	case IPPROTO_SCTP:
+		return "SCTP";
+	case IPPROTO_RAW:
+		return "RAW";
+	}
+
+	return "Unknown";
+}
+
+static void iavf_dump_tx_ipsec_desc(const volatile
+		struct iavf_tx_ipsec_desc *desc)
+{
+	struct iavf_tx_ipsec_desc ipsec;
+	uint16_t ivlen = 0;
+
+	ipsec.qw0 = rte_le_to_cpu_64(desc->qw0);
+	ipsec.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	switch (ipsec.ivlen) {
+	case 1:
+		ivlen = 4;
+		break;
+	case 2:
+		ivlen = 8;
+		break;
+	case 3:
+		ivlen = 16;
+		break;
+	}
+
+	printf("QW0: L4 Payload Length: %d\n", ipsec.l4payload_length);
+	printf("QW0: ESN : %d\n", ipsec.esn);
+	printf("QW0: ESP Trailer Length: %d\n", ipsec.trailer_length);
+
+	printf("QW1: DTYP: %d\n", ipsec.type);
+	printf("QW1: UDP: %s\n", ipsec.udp ? "yes" : "no");
+	printf("QW1: IV Length: %d\n", ivlen);
+	printf("QW1: Next Proto: (%d) %s\n", ipsec.next_header,
+			ipproto_to_str(ipsec.next_header));
+	printf("QW1: IPv6 Extension Headers Length: %d\n",
+			ipsec.ipv6_ext_hdr_length);
+	printf("QW1: SAID: %d\n", ipsec.said);
+}
+
+static const char *iipt_to_str(uint8_t iipt)
+{
+	switch (iipt) {
+	case 0:
+		return "Non IP packet / not defined";
+	case 1:
+		return "IPv6";
+	case 2:
+		return "IPv4 w/ no IP Checksum";
+	case 3:
+		return "IPv4 w/ IP Checksum";
+	}
+
+	return "";
+}
+
+static const char *l4t_to_str(uint8_t l4t)
+{
+	switch (l4t) {
+	case 0:
+		return "unknown / fragment";
+	case 1:
+		return "TCP";
+	case 2:
+		return "SCTP";
+	case 3:
+		return "UDP";
+	}
+
+	return "";
+}
+
+static void iavf_dump_tx_data_desc(const volatile struct iavf_tx_desc *desc)
+{
+	struct iavf_tx_desc data;
+
+
+	data.qw0 = rte_le_to_cpu_64(desc->qw0);
+	data.qw1 = rte_le_to_cpu_64(desc->qw1);
+
+	printf("QW0: Buffer Address : 0x%016"PRIx64"\n",
+			data.debug.buffer_addr);
+
+	printf("QW1: Dtype : %d\n", data.debug.type);
+
+	printf("QW1: Cmd : %x\n", data.debug.cmd);
+	printf("QW1: Cmd EOP     : %x\n", (data.debug.cmd >> 0) & 0x1);
+	printf("QW1: Cmd RS      : %x\n", (data.debug.cmd >> 1) & 0x1);
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 2) & 0x1);
+	printf("QW1: Cmd IL2TAG1 : %x\n", (data.debug.cmd >> 3) & 0x1);
+	printf("QW1: Cmd DUMMY   : %x\n", (data.debug.cmd >> 4) & 0x1);
+	printf("QW1: Cmd IIPT    : (%x) %s\n", (data.debug.cmd >> 5) & 0x3,
+			iipt_to_str((data.debug.cmd >> 5) & 0x3));
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 7) & 0x1);
+	printf("QW1: Cmd L4T     : (%x) %s\n", (data.debug.cmd >> 8) & 0x3,
+			l4t_to_str((data.debug.cmd >> 8) & 0x3));
+	printf("QW1: Cmd RE      : %x\n", (data.debug.cmd >> 10) & 0x1);
+	printf("QW1: Cmd RSV     : %x\n", (data.debug.cmd >> 11) & 0x1);
+
+	printf("QW1: Offset L2  : %d\n", data.debug.offset_l2len << 1);
+	printf("QW1: Offset L3  : %d\n", data.debug.offset_l3len << 2);
+	printf("QW1: Offset L4  : %d\n", data.debug.offset_l4len << 2);
+
+	printf("QW1: Tx Buf Sz  : %d\n", data.debug.buffer_sz);
+
+	printf("QW1: l2tag1 : %d\n", data.debug.l2tag1);
+}
+
 /* All the descriptors are 16 bytes, so just use one of them
  * to print the qwords
  */
@@ -555,24 +1097,29 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(tx_desc->qw1 &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
-		name = "Tx_data_desc";
+		name = "Data Tx Desc: ";
+		iavf_dump_tx_data_desc(desc);
 		break;
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
-		name = "Tx_context_desc";
+		name = "Context Tx Desc: ";
+		iavf_dump_tx_ctx_desc(desc);
+		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "IPsec Tx Desc: ";
+		iavf_dump_tx_ipsec_desc(desc);
 		break;
 	default:
-		name = "unknown_desc";
+		name = "Unknown Tx Desc: ";
 		break;
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->qw0, tx_desc->qw1);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 7f86050df3..d99b03c8b2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -23,8 +23,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1685,7 +1717,7 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 	 * before iavf_read_msg_from_pf.
 	 */
 	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	rte_intr_enable(&pci_dev->intr_handle);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
@@ -1721,7 +1753,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
@@ -1734,3 +1766,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index f2010a8337..385770b043 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -10,7 +10,7 @@ endif
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -20,6 +20,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] net/iavf: Add xstats support for inline IPsec crypto
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 1/4] common/iavf: " Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 2/4] net/iavf: " Radu Nicolau
@ 2021-09-15 13:32   ` Radu Nicolau
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
  3 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-15 13:32 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 934ef48278..d5f574b4b3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -101,7 +120,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 8a562e0942..b8b8d2e394 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -89,6 +89,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -144,21 +145,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -176,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1543,7 +1560,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1605,7 +1622,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1625,6 +1653,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1635,6 +1684,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1647,11 +1697,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 9852a89194..5bdd43bcc0 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] net/iavf: add watchdog for VFLR
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-09-15 13:32   ` Radu Nicolau
  3 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-15 13:32 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  6 +++
 drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d5f574b4b3..4481d2e134 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -212,6 +212,12 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	struct {
+		uint8_t enabled:1;
+		uint64_t period_us;
+	} watchdog;
+	/** iAVF watchdog configuration */
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index b8b8d2e394..1c9b58293e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -24,6 +24,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog.enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event has been detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
+{
+	int rc;
+
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 1;
+	adapter->vf.watchdog.period_us = period_us;
+
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, (void *)adapter);
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
+{
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 0;
+	adapter->vf.watchdog.period_us = 0;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2423,6 +2512,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog, set polling period to 500us */
+	iavf_dev_watchdog_enable(adapter, 500);
+
+
 	return 0;
 }
 
@@ -2493,6 +2587,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/4] net/iavf: add iAVF IPsec inline crypto support
  2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 2/4] net/iavf: " Radu Nicolau
@ 2021-09-18  5:28     ` Wu, Jingjing
  2021-09-20 13:44       ` Nicolau, Radu
  0 siblings, 1 reply; 128+ messages in thread
From: Wu, Jingjing @ 2021-09-18  5:28 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei, Richardson, Bruce, Ananyev,
	Konstantin, Ray Kinsella
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z

In general, the patch is too big to review. Patch split would help a lot!

[...]
> +static const struct rte_cryptodev_symmetric_capability *
> +get_capability(struct iavf_security_ctx *iavf_sctx,
> +	uint32_t algo, uint32_t type)
> +{
> +	const struct rte_cryptodev_capabilities *capability;
> +	int i = 0;
> +
> +	capability = &iavf_sctx->crypto_capabilities[i];
> +
> +	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
> +		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
> +			capability->sym.xform_type == type &&
> +			capability->sym.cipher.algo == algo)
> +			return &capability->sym;
> +		/** try next capability */
> +		capability = &iavf_crypto_capabilities[i++];

Better to  check i to avoid out of boundary.
[...]

> +
> +static int
> +valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
> +{
> +	if (len < min || len > max)
> +		return 0;
> +
> +	if (increment == 0)
> +		return 1;
> +
> +	if ((len - min) % increment)
> +		return 0;
> +
> +	return 1;
> +}
Would it be better to use true/false instead of 1/0? And the same to following valid functions.
[...]

> +static int
> +iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
> +	struct rte_security_session_conf *conf)
> +{
> +	/** validate security action/protocol selection */
> +	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> +		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
> +		PMD_DRV_LOG(ERR, "Unsupported action / protocol specified");
> +		return -EINVAL;
> +	}
> +
> +	/** validate IPsec protocol selection */
> +	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
> +		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol specified");
> +		return -EINVAL;
> +	}
> +
> +	/** validate selected options */
> +	if (conf->ipsec.options.copy_dscp ||
> +		conf->ipsec.options.copy_flabel ||
> +		conf->ipsec.options.copy_df ||
> +		conf->ipsec.options.dec_ttl ||
> +		conf->ipsec.options.ecn ||
> +		conf->ipsec.options.stats) {
> +		PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
> +		return -EINVAL;
> +	}
> +
> +	/**
> +	 * Validate crypto xforms parameters.
> +	 *
> +	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
> +	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
> +	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
> +	 */
> +	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
> +			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
> +			return -EINVAL;
> +		}
Invalid parameter, but not unsupported option, right? Same to below.
[...]

> +static void
> +sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +	struct rte_crypto_aead_xform *aead, uint32_t salt)
> +{
> +	cfg->crypto_type = VIRTCHNL_AEAD;
> +
> +	switch (aead->algo) {
> +	case RTE_CRYPTO_AEAD_AES_CCM:
> +		cfg->algo_type = VIRTCHNL_AES_CCM; break;
> +	case RTE_CRYPTO_AEAD_AES_GCM:
> +		cfg->algo_type = VIRTCHNL_AES_GCM; break;
> +	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
> +		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
> +	default:
> +		RTE_ASSERT("we should be here");

Assert just because invalid config? Similar comments to other valid functions.

> +	}
> +
> +	cfg->key_len = aead->key.length;
> +	cfg->iv_len = aead->iv.length;
> +	cfg->digest_len = aead->digest_length;
> +	cfg->salt = salt;
> +
> +	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
> +
Not only data, but length, better to valid before setting? The same to other kind params setting.
[...]


> +static inline void
> +iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
> +		struct rte_mbuf *m)
> +{
> +	uint64_t command = 0;
> +	uint64_t offset = 0;
> +	uint64_t l2tag1 = 0;
> +
> +	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
> +
> +	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
> +
> +	/* Descriptor based VLAN insertion */
> +	if (m->ol_flags & PKT_TX_VLAN_PKT) {
> +		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
> +		l2tag1 |= m->vlan_tci;
> +	}
> +
>  	/* Set MACLEN */
> -	*td_offset |= (tx_offload.l2_len >> 1) <<
> -		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> -
> -	/* Enable L3 checksum offloads */
> -	if (ol_flags & PKT_TX_IP_CKSUM) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	} else if (ol_flags & PKT_TX_IPV4) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	} else if (ol_flags & PKT_TX_IPV6) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	}
> -
> -	if (ol_flags & PKT_TX_TCP_SEG) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
> -		*td_offset |= (tx_offload.l4_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
> -		return;

PKT_TX_TCP_SEG flag implies PKT_TX_TCP_CKSUM,
so the offset cannot removed by just check cusm offload fields.
[...]


>  struct iavf_32b_rx_flex_desc_comms {
> +	union {
> +		struct {
>  	/* Qword 0 */
>  	u8 rxdid;
>  	u8 mir_id_umb_cast;
> @@ -305,6 +375,101 @@ struct iavf_32b_rx_flex_desc_comms {
>  		} flex;
>  		__le32 ts_high;
>  	} flex_ts;
> +		};
> +		struct {
> +			/* Quad Word 0 */
> +
> +			u8 rxdid;	/**< Descriptor builder profile ID */
> +
> +			u8 mirror_id:6;
> +			u8 umbcast:2;
> +
> +			__le16 ptype:10;
> +			__le16 flexi_flags_0:6;
> +
> +			__le16 packet_length:14;
> +			__le16 rsv_0:2;
> +
> +			__le16 hlen:11;
> +			__le16 sph:1;
> +			__le16 flexi_flags_1:4;
> +
> +			/* Quad Word 1 */
> +			union {
> +				__le16 status_error0;
> +				struct {
> +					__le16 status_error0_dd:1;
> +					/* descriptor done */
> +					__le16 status_error0_eop:1;
> +					/* end of packet */
> +					__le16 status_error0_hbo:1;
> +					/* header buffer overflow */
> +					__le16 status_error0_l3l4p:1;
> +					/* l3/l4 integrity check */
> +					__le16 status_error0_xsum:4;
> +					/* checksum report */
> +					__le16 status_error0_lpbk:1;
> +					/* loopback */
> +					__le16 status_error0_ipv6exadd:1;
> +					/* ipv6 w/ dst options or routing hdr */
> +					__le16 status_error0_rxe:1;
> +					/* rcv mac errors */
> +					__le16 status_error0_crcp:1;
> +					/* ethernet crc present */
> +					__le16 status_error0_rsshash:1;
> +					/* rss hash valid */
> +					__le16 status_error0_l2tag1p:1;
> +					/* l2 tag 1 present */
> +					__le16 status_error0_flexi_md0:1;
> +					/* flexi md field 0 valid */
> +					__le16 status_error0_flexi_md1:1;
> +					/* flexi md field 1 valid */
> +				};
> +			};
> +			__le16 l2tag1;
> +			__le16 flex_meta0;	/**< flexi metadata field 0 */
> +			__le16 flex_meta1;	/**< flexi metadata field 1 */
> +
> +			/* Quad Word 2 */
> +			union {
> +				__le16 status_error1;
> +				struct {
> +					__le16 status_error1_cpm:4;
> +					/* Inline IPsec Crypto Status */
> +					__le16 status_error1_udp_tunnel:1;
> +					/* UDP tunnelled packet NAT-T/UDP-NAT */
> +					__le16 status_error1_crypto:1;
> +					/* Inline IPsec Crypto Offload */
> +					__le16 status_error1_rsv:5;
> +					/* Reserved */
> +					__le16 status_error1_l2tag2p:1;
> +					/* l2 tag 2 present */
> +					__le16 status_error1_flexi_md2:1;
> +					/* flexi md field 2 valid */
> +					__le16 status_error1_flexi_md3:1;
> +					/* flexi md field 3 valid */
> +					__le16 status_error1_flexi_md4:1;
> +					/* flexi md field 4 valid */
> +					__le16 status_error1_flexi_md5:1;
> +					/* flexi md field 5 valid */
> +				};
> +			};
> +
> +			u8 flex_flags2;
> +			u8 time_stamp_low;
> +
> +			__le16 l2tag2_1st;			/**< L2TAG */
> +			__le16 l2tag2_2nd;			/**< L2TAG */
> +
> +			/* Quad Word 3 */
> +
> +			__le16 flex_meta2;	/**< flexi metadata field 2 */
> +			__le16 flex_meta3;	/**< flexi metadata field 3 */
> +			__le16 flex_meta4;	/**< flexi metadata field 4 */
> +			__le16 flex_meta5;	/**< flexi metadata field 5 */
> +
> +		} debug;
> +	};
>  };
If you check the description of this struct, you will find it is for RxDID Profile ID 16-21.
I think you need define a new struct for ipsec. And for debug, also prefer a new struct
or some func instead of adding union and fields in defined formatted descriptor. 
[....]


> +
> +#include <netinet/in.h>
> +
Put this inline at the beginning of file?
[...]

> @@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
>  		case iavf_aqc_opc_send_msg_to_vf:
>  			if (msg_opc == VIRTCHNL_OP_EVENT) {
>  				iavf_handle_pf_event_msg(dev, info.msg_buf,
> -							info.msg_len);
> +						info.msg_len);
>  			} else {
> +				/* check for inline IPsec events */
> +				struct inline_ipsec_msg *imsg =
> +					(struct inline_ipsec_msg *)info.msg_buf;
> +				struct rte_eth_event_ipsec_desc desc;
> +				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
> +					&& imsg->ipsec_opcode ==
> +						INLINE_IPSEC_OP_EVENT) {
> +					struct virtchnl_ipsec_event *ev =
> +							imsg->ipsec_data.event;
> +					desc.subtype =
> +						RTE_ETH_EVENT_IPSEC_UNKNOWN;
> +					desc.metadata = ev->ipsec_event_data;
> +					rte_eth_dev_callback_process(dev,
> +							RTE_ETH_EVENT_IPSEC,
> +							&desc);
> +					return;
> +				}
> +
>  				/* read message and it's expected one */
> -				if (msg_opc == vf->pend_cmd)
> -					_notify_cmd(vf, msg_ret);
> -				else
> -					PMD_DRV_LOG(ERR, "command mismatch,"
> -						    "expect %u, get %u",
> -						    vf->pend_cmd, msg_opc);
> +				if (msg_opc == vf->pend_cmd) {
> +					rte_atomic32_dec(&vf->pend_cmd_count);
> +					if (rte_atomic32_read(
> +						&vf->pend_cmd_count) == 0)
> +						_notify_cmd(vf, msg_ret);
Only dec the count, does the async mean only the second message carries response info?

[...]

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/4] net/iavf: add iAVF IPsec inline crypto support
  2021-09-18  5:28     ` Wu, Jingjing
@ 2021-09-20 13:44       ` Nicolau, Radu
  0 siblings, 0 replies; 128+ messages in thread
From: Nicolau, Radu @ 2021-09-20 13:44 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei, Richardson, Bruce, Ananyev,
	Konstantin, Ray Kinsella
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z

Hi Jingjing, thanks for reviewing!


On 9/18/2021 6:28 AM, Wu, Jingjing wrote:
> In general, the patch is too big to review. Patch split would help a lot!
I will do my best to split in in the next revision.
>
> [...]
>> +static const struct rte_cryptodev_symmetric_capability *
>> +get_capability(struct iavf_security_ctx *iavf_sctx,
>> +	uint32_t algo, uint32_t type)
>> +{
>> +	const struct rte_cryptodev_capabilities *capability;
>> +	int i = 0;
>> +
>> +	capability = &iavf_sctx->crypto_capabilities[i];
>> +
>> +	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>> +		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
>> +			capability->sym.xform_type == type &&
>> +			capability->sym.cipher.algo == algo)
>> +			return &capability->sym;
>> +		/** try next capability */
>> +		capability = &iavf_crypto_capabilities[i++];
> Better to  check i to avoid out of boundary.
The condition in the while statement plus the last element in the array 
set as RTE_CRYPTO_OP_TYPE_UNDEFINED prevents the loop from going out of 
bounds.
> [...]
>
>> +
>> +static int
>> +valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
>> +{
>> +	if (len < min || len > max)
>> +		return 0;
>> +
>> +	if (increment == 0)
>> +		return 1;
>> +
>> +	if ((len - min) % increment)
>> +		return 0;
>> +
>> +	return 1;
>> +}
> Would it be better to use true/false instead of 1/0? And the same to following valid functions.
Will do.
> [...]
>
>> +static int
>> +iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
>> +	struct rte_security_session_conf *conf)
>> +{
>> +	/** validate security action/protocol selection */
>> +	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
>> +		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
>> +		PMD_DRV_LOG(ERR, "Unsupported action / protocol specified");
>> +		return -EINVAL;
>> +	}
>> +
>> +	/** validate IPsec protocol selection */
>> +	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
>> +		PMD_DRV_LOG(ERR, "Unsupported IPsec protocol specified");
>> +		return -EINVAL;
>> +	}
>> +
>> +	/** validate selected options */
>> +	if (conf->ipsec.options.copy_dscp ||
>> +		conf->ipsec.options.copy_flabel ||
>> +		conf->ipsec.options.copy_df ||
>> +		conf->ipsec.options.dec_ttl ||
>> +		conf->ipsec.options.ecn ||
>> +		conf->ipsec.options.stats) {
>> +		PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
>> +		return -EINVAL;
>> +	}
>> +
>> +	/**
>> +	 * Validate crypto xforms parameters.
>> +	 *
>> +	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
>> +	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
>> +	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
>> +	 */
>> +	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
>> +		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
>> +			PMD_DRV_LOG(ERR, "Unsupported IPsec option specified");
>> +			return -EINVAL;
>> +		}
> Invalid parameter, but not unsupported option, right? Same to below.
I reworked the messages to be consistent
> [...]
>
>> +static void
>> +sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
>> +	struct rte_crypto_aead_xform *aead, uint32_t salt)
>> +{
>> +	cfg->crypto_type = VIRTCHNL_AEAD;
>> +
>> +	switch (aead->algo) {
>> +	case RTE_CRYPTO_AEAD_AES_CCM:
>> +		cfg->algo_type = VIRTCHNL_AES_CCM; break;
>> +	case RTE_CRYPTO_AEAD_AES_GCM:
>> +		cfg->algo_type = VIRTCHNL_AES_GCM; break;
>> +	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
>> +		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
>> +	default:
>> +		RTE_ASSERT("we should be here");
> Assert just because invalid config? Similar comments to other valid functions.
Removed
>
>> +	}
>> +
>> +	cfg->key_len = aead->key.length;
>> +	cfg->iv_len = aead->iv.length;
>> +	cfg->digest_len = aead->digest_length;
>> +	cfg->salt = salt;
>> +
>> +	RTE_ASSERT(sizeof(cfg->key_data) < cfg->key_len);
>> +
> Not only data, but length, better to valid before setting? The same to other kind params setting.
The length here is checked to fit into the array, it can still be valid; 
I moved this check in the valid_length function that can actually return 
an error.
> [...]
>
>
>> +static inline void
>> +iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
>> +		struct rte_mbuf *m)
>> +{
>> +	uint64_t command = 0;
>> +	uint64_t offset = 0;
>> +	uint64_t l2tag1 = 0;
>> +
>> +	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
>> +
>> +	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
>> +
>> +	/* Descriptor based VLAN insertion */
>> +	if (m->ol_flags & PKT_TX_VLAN_PKT) {
>> +		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
>> +		l2tag1 |= m->vlan_tci;
>> +	}
>> +
>>   	/* Set MACLEN */
>> -	*td_offset |= (tx_offload.l2_len >> 1) <<
>> -		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
>> -
>> -	/* Enable L3 checksum offloads */
>> -	if (ol_flags & PKT_TX_IP_CKSUM) {
>> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
>> -		*td_offset |= (tx_offload.l3_len >> 2) <<
>> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
>> -	} else if (ol_flags & PKT_TX_IPV4) {
>> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
>> -		*td_offset |= (tx_offload.l3_len >> 2) <<
>> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
>> -	} else if (ol_flags & PKT_TX_IPV6) {
>> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
>> -		*td_offset |= (tx_offload.l3_len >> 2) <<
>> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
>> -	}
>> -
>> -	if (ol_flags & PKT_TX_TCP_SEG) {
>> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
>> -		*td_offset |= (tx_offload.l4_len >> 2) <<
>> -			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
>> -		return;
> PKT_TX_TCP_SEG flag implies PKT_TX_TCP_CKSUM,
> so the offset cannot removed by just check cusm offload fields.
I added this back, most likely it was a rebase mistake.
> [...]
>
>
>>   struct iavf_32b_rx_flex_desc_comms {
>> +	union {
>> +		struct {
>>   	/* Qword 0 */
>>   	u8 rxdid;
>>   	u8 mir_id_umb_cast;
>> @@ -305,6 +375,101 @@ struct iavf_32b_rx_flex_desc_comms {
>>   		} flex;
>>   		__le32 ts_high;
>>   	} flex_ts;
>> +		};
>> +		struct {
>> +			/* Quad Word 0 */
>> +
>> +			u8 rxdid;	/**< Descriptor builder profile ID */
>> +
>> +			u8 mirror_id:6;
>> +			u8 umbcast:2;
>> +
>> +			__le16 ptype:10;
>> +			__le16 flexi_flags_0:6;
>> +
>> +			__le16 packet_length:14;
>> +			__le16 rsv_0:2;
>> +
>> +			__le16 hlen:11;
>> +			__le16 sph:1;
>> +			__le16 flexi_flags_1:4;
>> +
>> +			/* Quad Word 1 */
>> +			union {
>> +				__le16 status_error0;
>> +				struct {
>> +					__le16 status_error0_dd:1;
>> +					/* descriptor done */
>> +					__le16 status_error0_eop:1;
>> +					/* end of packet */
>> +					__le16 status_error0_hbo:1;
>> +					/* header buffer overflow */
>> +					__le16 status_error0_l3l4p:1;
>> +					/* l3/l4 integrity check */
>> +					__le16 status_error0_xsum:4;
>> +					/* checksum report */
>> +					__le16 status_error0_lpbk:1;
>> +					/* loopback */
>> +					__le16 status_error0_ipv6exadd:1;
>> +					/* ipv6 w/ dst options or routing hdr */
>> +					__le16 status_error0_rxe:1;
>> +					/* rcv mac errors */
>> +					__le16 status_error0_crcp:1;
>> +					/* ethernet crc present */
>> +					__le16 status_error0_rsshash:1;
>> +					/* rss hash valid */
>> +					__le16 status_error0_l2tag1p:1;
>> +					/* l2 tag 1 present */
>> +					__le16 status_error0_flexi_md0:1;
>> +					/* flexi md field 0 valid */
>> +					__le16 status_error0_flexi_md1:1;
>> +					/* flexi md field 1 valid */
>> +				};
>> +			};
>> +			__le16 l2tag1;
>> +			__le16 flex_meta0;	/**< flexi metadata field 0 */
>> +			__le16 flex_meta1;	/**< flexi metadata field 1 */
>> +
>> +			/* Quad Word 2 */
>> +			union {
>> +				__le16 status_error1;
>> +				struct {
>> +					__le16 status_error1_cpm:4;
>> +					/* Inline IPsec Crypto Status */
>> +					__le16 status_error1_udp_tunnel:1;
>> +					/* UDP tunnelled packet NAT-T/UDP-NAT */
>> +					__le16 status_error1_crypto:1;
>> +					/* Inline IPsec Crypto Offload */
>> +					__le16 status_error1_rsv:5;
>> +					/* Reserved */
>> +					__le16 status_error1_l2tag2p:1;
>> +					/* l2 tag 2 present */
>> +					__le16 status_error1_flexi_md2:1;
>> +					/* flexi md field 2 valid */
>> +					__le16 status_error1_flexi_md3:1;
>> +					/* flexi md field 3 valid */
>> +					__le16 status_error1_flexi_md4:1;
>> +					/* flexi md field 4 valid */
>> +					__le16 status_error1_flexi_md5:1;
>> +					/* flexi md field 5 valid */
>> +				};
>> +			};
>> +
>> +			u8 flex_flags2;
>> +			u8 time_stamp_low;
>> +
>> +			__le16 l2tag2_1st;			/**< L2TAG */
>> +			__le16 l2tag2_2nd;			/**< L2TAG */
>> +
>> +			/* Quad Word 3 */
>> +
>> +			__le16 flex_meta2;	/**< flexi metadata field 2 */
>> +			__le16 flex_meta3;	/**< flexi metadata field 3 */
>> +			__le16 flex_meta4;	/**< flexi metadata field 4 */
>> +			__le16 flex_meta5;	/**< flexi metadata field 5 */
>> +
>> +		} debug;
>> +	};
>>   };
> If you check the description of this struct, you will find it is for RxDID Profile ID 16-21.
> I think you need define a new struct for ipsec. And for debug, also prefer a new struct
> or some func instead of adding union and fields in defined formatted descriptor.
I removed this altogether as it has no functional purpose.
> [....]
>
>
>> +
>> +#include <netinet/in.h>
>> +
> Put this inline at the beginning of file?
I removed this section
> [...]
>
>> @@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
>>   		case iavf_aqc_opc_send_msg_to_vf:
>>   			if (msg_opc == VIRTCHNL_OP_EVENT) {
>>   				iavf_handle_pf_event_msg(dev, info.msg_buf,
>> -							info.msg_len);
>> +						info.msg_len);
>>   			} else {
>> +				/* check for inline IPsec events */
>> +				struct inline_ipsec_msg *imsg =
>> +					(struct inline_ipsec_msg *)info.msg_buf;
>> +				struct rte_eth_event_ipsec_desc desc;
>> +				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
>> +					&& imsg->ipsec_opcode ==
>> +						INLINE_IPSEC_OP_EVENT) {
>> +					struct virtchnl_ipsec_event *ev =
>> +							imsg->ipsec_data.event;
>> +					desc.subtype =
>> +						RTE_ETH_EVENT_IPSEC_UNKNOWN;
>> +					desc.metadata = ev->ipsec_event_data;
>> +					rte_eth_dev_callback_process(dev,
>> +							RTE_ETH_EVENT_IPSEC,
>> +							&desc);
>> +					return;
>> +				}
>> +
>>   				/* read message and it's expected one */
>> -				if (msg_opc == vf->pend_cmd)
>> -					_notify_cmd(vf, msg_ret);
>> -				else
>> -					PMD_DRV_LOG(ERR, "command mismatch,"
>> -						    "expect %u, get %u",
>> -						    vf->pend_cmd, msg_opc);
>> +				if (msg_opc == vf->pend_cmd) {
>> +					rte_atomic32_dec(&vf->pend_cmd_count);
>> +					if (rte_atomic32_read(
>> +						&vf->pend_cmd_count) == 0)
>> +						_notify_cmd(vf, msg_ret);
> Only dec the count, does the async mean only the second message carries response info?
Yes, from my understanding the 1st message is the confirmation of the 
request and the second is the reply.
>
> [...]

^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (4 preceding siblings ...)
  2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-09-20 13:51 ` Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 1/6] common/iavf: " Radu Nicolau
                     ` (5 more replies)
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (10 subsequent siblings)
  16 siblings, 6 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:51 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (6):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |  215 +-
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   53 +-
 drivers/net/iavf/iavf_ethdev.c                |  222 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1918 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  709 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |   91 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4137 insertions(+), 321 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 1/6] common/iavf: add iAVF IPsec inline crypto support
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-09-20 13:51   ` Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 2/6] net/iavf: rework tx path Radu Nicolau
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             | 215 +++++++-
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 775 insertions(+), 10 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..1f8f8ae5fd 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -709,11 +709,29 @@ enum iavf_rx_prog_status_desc_error_bits {
 #define IAVF_FOUR_BIT_MASK	0xF
 #define IAVF_EIGHTEEN_BIT_MASK	0x3FFFF
 
-/* TX Descriptor */
+/* TX Data Descriptor */
 struct iavf_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
+	union {
+		struct {
+			__le64 buffer_addr; /* Addr of descriptor's data buf */
+			__le64 cmd_type_offset_bsz;
+		};
+		struct {
+			__le64 qw0; /**< data buffer address */
+			__le64 qw1; /**< dtyp, cmd, offset, buf_sz and l2tag1 */
+		};
+		struct {
+			__le64 buffer_addr;	/**< Data buffer address */
+			__le64 type:4;		/**< Descriptor type */
+			__le64 cmd:12;		/**< Command field */
+			__le64 offset_l2len:7;	/**< L2 header length */
+			__le64 offset_l3len:7;	/**< L3 header length */
+			__le64 offset_l4len:4;	/**< L4 header length */
+			__le64 buffer_sz:14;	/**< Data buffer size */
+			__le64 l2tag1:16;	/**< L2 Tag 1 value */
+		} debug __rte_packed;
+	};
+} __rte_packed;
 
 #define IAVF_TXD_QW1_DTYPE_SHIFT	0
 #define IAVF_TXD_QW1_DTYPE_MASK		(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -723,6 +741,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
@@ -734,7 +753,7 @@ enum iavf_tx_desc_dtype_value {
 #define IAVF_TXD_QW1_CMD_SHIFT	4
 #define IAVF_TXD_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
 
-enum iavf_tx_desc_cmd_bits {
+enum iavf_tx_data_desc_cmd_bits {
 	IAVF_TX_DESC_CMD_EOP			= 0x0001,
 	IAVF_TX_DESC_CMD_RS			= 0x0002,
 	IAVF_TX_DESC_CMD_ICRC			= 0x0004,
@@ -778,18 +797,79 @@ enum iavf_tx_desc_length_fields {
 #define IAVF_TXD_QW1_L2TAG1_SHIFT	48
 #define IAVF_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
 
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
 /* Context descriptors */
 struct iavf_tx_context_desc {
+	union {
+		struct {
 	__le32 tunneling_params;
 	__le16 l2tag2;
 	__le16 rsvd;
 	__le64 type_cmd_tso_mss;
 };
-
-#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	0
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le32 tunneling;
+			__le16 l2tag2;
+			__le16 rsvd0;
+			__le64 type:4;
+			__le64 cmd:7;
+			__le64 ipsec:7;
+			__le64 rsvd1:12;
+			__le64 tlen_tsyn:18;
+			__le64 rsvd2:2;
+			__le64 mss_target_vsi:14;
+		} debug __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
 
-#define IAVF_TXD_CTX_QW1_CMD_SHIFT	4
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT	(4)
 #define IAVF_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
 
 enum iavf_tx_ctx_desc_cmd_bits {
@@ -804,6 +884,63 @@ enum iavf_tx_ctx_desc_cmd_bits {
 	IAVF_TX_CTX_DESC_SWPE		= 0x40
 };
 
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
 struct iavf_nop_desc {
 	__le64 rsvd;
 	__le64 dtype_cmd;
@@ -911,6 +1048,68 @@ enum iavf_tx_ctx_desc_eipt_offload {
 #define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT	23
 #define IAVF_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
 
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
 /* Statistics collected by each port, VSI, VEB, and S-channel */
 struct iavf_eth_stats {
 	u64 rx_bytes;			/* gorc */
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 83f51d889f..5cc326c035 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2290,6 +2295,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 2/6] net/iavf: rework tx path
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 1/6] common/iavf: " Radu Nicolau
@ 2021-09-20 13:51   ` Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 536 +++++++++++++++------------
 drivers/net/iavf/iavf_rxtx.h         |   9 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 319 insertions(+), 236 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 6de8ad3fe3..a84a0b07f6 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1048,27 +1048,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1388,7 +1392,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1530,7 +1534,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1768,7 +1772,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2038,7 +2042,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+	if ((txd[desc_to_clean_to].qw1 &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
@@ -2054,7 +2058,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 					last_desc_cleaned);
 
-	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+	txd[desc_to_clean_to].qw1 = 0;
 
 	txq->last_desc_cleaned = desc_to_clean_to;
 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -2062,190 +2066,296 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
+{
+	/* fill descriptor type field */
+	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc->qw0, m);
+	else
+		desc->qw0 = 0;
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->qw1 = desc_template;
+
+	/* set data buffer address */
+	desc->qw0 = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
+
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
 
-	return ctx_desc;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* set data buffer size */
+	desc->qw1 |= (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
 
-		tx_pkt = *tx_pkts++;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
+
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2253,122 +2363,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
+		
+		mb_seg = mb;
 
-		m_seg = tx_pkt;
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->qw1 |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
@@ -2869,7 +2951,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->tx_ring[desc].qw1;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..1bc47614ea 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -555,9 +555,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(tx_desc->qw1 &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +571,7 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->qw0, tx_desc->qw1);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 1/6] common/iavf: " Radu Nicolau
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-09-20 13:51   ` Radu Nicolau
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 137 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 52 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd078111..8c7f7c0bed 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -189,6 +189,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -340,9 +341,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 7f86050df3..5c62443999 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -23,8 +23,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1685,7 +1717,7 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 	 * before iavf_read_msg_from_pf.
 	 */
 	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	rte_intr_enable(&pci_dev->intr_handle);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
@@ -1721,7 +1753,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
@@ -1734,3 +1766,4 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-09-20 13:52   ` Radu Nicolau
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1918 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  203 +-
 drivers/net/iavf/iavf_rxtx.h                  |   94 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2777 insertions(+), 22 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8c7f7c0bed..934ef48278 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -217,6 +217,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -239,6 +240,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -250,11 +252,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -273,6 +278,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -415,5 +422,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index c131461517..294be1a022 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -29,6 +29,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -70,6 +71,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -974,6 +985,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1730,6 +1746,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1738,8 +1755,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2357,6 +2374,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 1fe270fb22..d85e82a950 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..604766b640
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1918 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+		uint16_t mss;
+	} tso;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = htonl(conf->ipsec.spi);
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.tso) {
+		sess->tso.enabled = 1;
+		sess->tso.mss = conf->ipsec.mss;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	/* update TSO MSS size */
+	if (iavf_sess->tso.enabled && conf->ipsec.options.tso)
+		iavf_sess->tso.mss = conf->ipsec.mss;
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .tso = 1,
+						.stats = 1, .esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY
+	                && actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..d8d7d6649e
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index a84a0b07f6..3f8c0822b7 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -504,6 +508,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -688,6 +698,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -732,9 +744,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -758,6 +770,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1075,6 +1091,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1393,6 +1473,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1535,6 +1617,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1773,6 +1857,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2085,6 +2171,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2117,15 +2215,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2148,7 +2250,8 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	/* fill descriptor type field */
 	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
@@ -2158,8 +2261,12 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc->qw1, ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2173,6 +2280,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2286,6 +2425,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2314,7 +2464,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2324,16 +2476,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2384,7 +2543,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2392,8 +2551,28 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
-		
 		mb_seg = mb;
 
 		do {
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1bc47614ea..e009387aff 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,24 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -565,6 +644,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 5c62443999..d99b03c8b2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1767,3 +1767,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	return 0;
 }
 
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index f2010a8337..385770b043 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -10,7 +10,7 @@ endif
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -20,6 +20,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-09-20 13:52   ` Radu Nicolau
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 934ef48278..d5f574b4b3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -101,7 +120,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 294be1a022..aad6a28585 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -89,6 +89,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -144,21 +145,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -176,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1543,7 +1560,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1605,7 +1622,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1625,6 +1653,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1635,6 +1684,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1647,11 +1697,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e009387aff..18bf8f4921 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v3 6/6] net/iavf: add watchdog for VFLR
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-09-20 13:52   ` Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-09-20 13:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  6 +++
 drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d5f574b4b3..4481d2e134 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -212,6 +212,12 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	struct {
+		uint8_t enabled:1;
+		uint64_t period_us;
+	} watchdog;
+	/** iAVF watchdog configuration */
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index aad6a28585..d02aa9c1c5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -24,6 +24,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog.enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event has been detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
+{
+	int rc;
+
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 1;
+	adapter->vf.watchdog.period_us = period_us;
+
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, (void *)adapter);
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
+{
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 0;
+	adapter->vf.watchdog.period_us = 0;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2448,6 +2537,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog, set polling period to 500us */
+	iavf_dev_watchdog_enable(adapter, 500);
+
+
 	return 0;
 
 flow_init_err:
@@ -2527,6 +2621,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (5 preceding siblings ...)
  2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-01  9:51 ` Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 1/6] common/iavf: " Radu Nicolau
                     ` (5 more replies)
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (9 subsequent siblings)
  16 siblings, 6 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (6):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |  215 +-
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   53 +-
 drivers/net/iavf/iavf_ethdev.c                |  222 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  709 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |   91 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4123 insertions(+), 321 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 1/6] common/iavf: add iAVF IPsec inline crypto support
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path Radu Nicolau
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             | 215 +++++++-
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 775 insertions(+), 10 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..1f8f8ae5fd 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -709,11 +709,29 @@ enum iavf_rx_prog_status_desc_error_bits {
 #define IAVF_FOUR_BIT_MASK	0xF
 #define IAVF_EIGHTEEN_BIT_MASK	0x3FFFF
 
-/* TX Descriptor */
+/* TX Data Descriptor */
 struct iavf_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
+	union {
+		struct {
+			__le64 buffer_addr; /* Addr of descriptor's data buf */
+			__le64 cmd_type_offset_bsz;
+		};
+		struct {
+			__le64 qw0; /**< data buffer address */
+			__le64 qw1; /**< dtyp, cmd, offset, buf_sz and l2tag1 */
+		};
+		struct {
+			__le64 buffer_addr;	/**< Data buffer address */
+			__le64 type:4;		/**< Descriptor type */
+			__le64 cmd:12;		/**< Command field */
+			__le64 offset_l2len:7;	/**< L2 header length */
+			__le64 offset_l3len:7;	/**< L3 header length */
+			__le64 offset_l4len:4;	/**< L4 header length */
+			__le64 buffer_sz:14;	/**< Data buffer size */
+			__le64 l2tag1:16;	/**< L2 Tag 1 value */
+		} debug __rte_packed;
+	};
+} __rte_packed;
 
 #define IAVF_TXD_QW1_DTYPE_SHIFT	0
 #define IAVF_TXD_QW1_DTYPE_MASK		(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -723,6 +741,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
@@ -734,7 +753,7 @@ enum iavf_tx_desc_dtype_value {
 #define IAVF_TXD_QW1_CMD_SHIFT	4
 #define IAVF_TXD_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
 
-enum iavf_tx_desc_cmd_bits {
+enum iavf_tx_data_desc_cmd_bits {
 	IAVF_TX_DESC_CMD_EOP			= 0x0001,
 	IAVF_TX_DESC_CMD_RS			= 0x0002,
 	IAVF_TX_DESC_CMD_ICRC			= 0x0004,
@@ -778,18 +797,79 @@ enum iavf_tx_desc_length_fields {
 #define IAVF_TXD_QW1_L2TAG1_SHIFT	48
 #define IAVF_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
 
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
 /* Context descriptors */
 struct iavf_tx_context_desc {
+	union {
+		struct {
 	__le32 tunneling_params;
 	__le16 l2tag2;
 	__le16 rsvd;
 	__le64 type_cmd_tso_mss;
 };
-
-#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	0
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le32 tunneling;
+			__le16 l2tag2;
+			__le16 rsvd0;
+			__le64 type:4;
+			__le64 cmd:7;
+			__le64 ipsec:7;
+			__le64 rsvd1:12;
+			__le64 tlen_tsyn:18;
+			__le64 rsvd2:2;
+			__le64 mss_target_vsi:14;
+		} debug __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
 
-#define IAVF_TXD_CTX_QW1_CMD_SHIFT	4
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT	(4)
 #define IAVF_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
 
 enum iavf_tx_ctx_desc_cmd_bits {
@@ -804,6 +884,63 @@ enum iavf_tx_ctx_desc_cmd_bits {
 	IAVF_TX_CTX_DESC_SWPE		= 0x40
 };
 
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
 struct iavf_nop_desc {
 	__le64 rsvd;
 	__le64 dtype_cmd;
@@ -911,6 +1048,68 @@ enum iavf_tx_ctx_desc_eipt_offload {
 #define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT	23
 #define IAVF_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
 
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
 /* Statistics collected by each port, VSI, VEB, and S-channel */
 struct iavf_eth_stats {
 	u64 rx_bytes;			/* gorc */
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 83f51d889f..5cc326c035 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2290,6 +2295,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 1/6] common/iavf: " Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-04  1:24     ` Wu, Jingjing
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (3 subsequent siblings)
  5 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 536 +++++++++++++++------------
 drivers/net/iavf/iavf_rxtx.h         |   9 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 319 insertions(+), 236 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 6de8ad3fe3..d2cb6d59bc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1048,27 +1048,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1388,7 +1392,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1530,7 +1534,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1768,7 +1772,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2038,7 +2042,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+	if ((txd[desc_to_clean_to].qw1 &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
@@ -2054,7 +2058,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 					last_desc_cleaned);
 
-	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+	txd[desc_to_clean_to].qw1 = 0;
 
 	txq->last_desc_cleaned = desc_to_clean_to;
 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -2062,190 +2066,296 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
+{
+	/* fill descriptor type field */
+	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc->qw0, m);
+	else
+		desc->qw0 = 0;
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->qw1 = desc_template;
+
+	/* set data buffer address */
+	desc->qw0 = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
 
-	return ctx_desc;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* set data buffer size */
+	desc->qw1 |= (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2253,122 +2363,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->qw1 |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
@@ -2869,7 +2951,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->tx_ring[desc].qw1;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..1bc47614ea 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -555,9 +555,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(tx_desc->qw1 &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +571,7 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->qw0, tx_desc->qw1);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 1/6] common/iavf: " Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-04  1:34     ` Wu, Jingjing
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  5 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 137 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 52 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd078111..8c7f7c0bed 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -189,6 +189,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -340,9 +341,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 7f86050df3..5c62443999 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -23,8 +23,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1685,7 +1717,7 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 	 * before iavf_read_msg_from_pf.
 	 */
 	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	rte_intr_enable(&pci_dev->intr_handle);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
@@ -1721,7 +1753,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
@@ -1734,3 +1766,4 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-04  1:50     ` Wu, Jingjing
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  201 +-
 drivers/net/iavf/iavf_rxtx.h                  |   94 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2762 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8c7f7c0bed..934ef48278 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -217,6 +217,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -239,6 +240,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -250,11 +252,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -273,6 +278,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -415,5 +422,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index c131461517..294be1a022 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -29,6 +29,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -70,6 +71,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -974,6 +985,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1730,6 +1746,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1738,8 +1755,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2357,6 +2374,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 1fe270fb22..d85e82a950 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..9635b41679
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1904 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..d8d7d6649e
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d2cb6d59bc..3f8c0822b7 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -504,6 +508,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -688,6 +698,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -732,9 +744,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -758,6 +770,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1075,6 +1091,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1393,6 +1473,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1535,6 +1617,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1773,6 +1857,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2085,6 +2171,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2117,15 +2215,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2148,7 +2250,8 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	/* fill descriptor type field */
 	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
@@ -2158,8 +2261,12 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc->qw1, ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2173,6 +2280,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2286,6 +2425,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2314,7 +2464,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2324,16 +2476,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2384,7 +2543,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2392,7 +2551,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1bc47614ea..e009387aff 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,24 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -565,6 +644,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 5c62443999..d99b03c8b2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1767,3 +1767,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	return 0;
 }
 
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index f2010a8337..385770b043 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -10,7 +10,7 @@ endif
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -20,6 +20,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-04  2:01     ` Wu, Jingjing
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 934ef48278..d5f574b4b3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -101,7 +120,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 294be1a022..aad6a28585 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -89,6 +89,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -144,21 +145,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -176,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1543,7 +1560,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1605,7 +1622,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1625,6 +1653,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1635,6 +1684,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1647,11 +1697,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e009387aff..18bf8f4921 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-01  9:51   ` Radu Nicolau
  2021-10-04  2:15     ` Wu, Jingjing
  5 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-01  9:51 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  6 +++
 drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d5f574b4b3..4481d2e134 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -212,6 +212,12 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	struct {
+		uint8_t enabled:1;
+		uint64_t period_us;
+	} watchdog;
+	/** iAVF watchdog configuration */
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index aad6a28585..d02aa9c1c5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -24,6 +24,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog.enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event has been detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
+{
+	int rc;
+
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 1;
+	adapter->vf.watchdog.period_us = period_us;
+
+	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
+			&iavf_dev_watchdog, (void *)adapter);
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
+{
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+
+	adapter->vf.watchdog.enabled = 0;
+	adapter->vf.watchdog.period_us = 0;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2448,6 +2537,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog, set polling period to 500us */
+	iavf_dev_watchdog_enable(adapter, 500);
+
+
 	return 0;
 
 flow_init_err:
@@ -2527,6 +2621,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-10-04  1:24     ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-04  1:24 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei, Richardson, Bruce, Ananyev, Konstantin
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 1, 2021 5:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Richardson,
> Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>
> Subject: [PATCH v4 2/6] net/iavf: rework tx path
> 
> Rework the TX path and TX descriptor usage in order to
> allow for better use of oflload flags and to facilitate enabling of
> inline crypto offload feature.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c         | 536 +++++++++++++++------------
>  drivers/net/iavf/iavf_rxtx.h         |   9 +-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
>  3 files changed, 319 insertions(+), 236 deletions(-)

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-04  1:34     ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-04  1:34 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 1, 2021 5:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages
> 
> Add support for asynchronous virtual channel messages, specifically for
> inline IPsec messages.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> ---
>  drivers/net/iavf/iavf.h       |  16 ++++
>  drivers/net/iavf/iavf_vchnl.c | 137 +++++++++++++++++++++-------------
>  2 files changed, 101 insertions(+), 52 deletions(-)

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-04  1:50     ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-04  1:50 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei, Ray Kinsella
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 1, 2021 5:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Ray Kinsella
> <mdr@ashroe.eu>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and
> tunnel over IPv4 and IPv6, as well as supporting the offload for
> ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
> Implement support for rte_security packet metadata
> 
> Add definition for IPsec descriptors, extend support for offload
> in data and context descriptor to support
> 
> Add support to virtual channel mailbox for IPsec Crypto request
> operations. IPsec Crypto requests receive an initial acknowledgement
> from phsyical function driver of receipt of request and then an
> asynchronous response with success/failure of request including any
> response data.
> 
> Add enhanced descriptor debugging
> 
> Refactor of scalar tx burst function to support integration of offload
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>

Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-04  2:01     ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-04  2:01 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 1, 2021 5:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto
> 
> Add per queue counters for maintaining statistics for inline IPsec
> crypto offload, which can be retrieved through the
> rte_security_session_stats_get() with more detailed errors through the
> rte_ethdev xstats.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-04  2:15     ` Wu, Jingjing
  2021-10-04 11:18       ` Nicolau, Radu
  0 siblings, 1 reply; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-04  2:15 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 1, 2021 5:52 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v4 6/6] net/iavf: add watchdog for VFLR
> 
> Add watchdog to iAVF PMD which support monitoring the VFLR register. If
> the device is not already in reset then if a VF reset in progress is
> detected then notfiy user through callback and set into reset state.
> If the device is already in reset then poll for completion of reset.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> ---
>  drivers/net/iavf/iavf.h        |  6 +++
>  drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
>  2 files changed, 103 insertions(+)
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
> index d5f574b4b3..4481d2e134 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -212,6 +212,12 @@ struct iavf_info {
>  	int cmd_retval; /* return value of the cmd response from PF */
>  	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
> 
> +	struct {
> +		uint8_t enabled:1;
> +		uint64_t period_us;
> +	} watchdog;
> +	/** iAVF watchdog configuration */
> +
>  	/* Event from pf */
>  	bool dev_closed;
>  	bool link_up;
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index aad6a28585..d02aa9c1c5 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -24,6 +24,7 @@
>  #include <rte_malloc.h>
>  #include <rte_memzone.h>
>  #include <rte_dev.h>
> +#include <rte_alarm.h>
> 
>  #include "iavf.h"
>  #include "iavf_rxtx.h"
> @@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
>  	return 0;
>  }
> 
> +
> +static int
> +iavf_vfr_inprogress(struct iavf_hw *hw)
> +{
> +	int inprogress = 0;
> +
> +	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
> +		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
> +		VIRTCHNL_VFR_INPROGRESS)
> +		inprogress = 1;
> +
> +	if (inprogress)
> +		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
> +
> +	return inprogress;
> +}
> +
> +static void
> +iavf_dev_watchdog(void *cb_arg)
> +{
> +	struct iavf_adapter *adapter = cb_arg;
> +	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
> +	int vfr_inprogress = 0, rc = 0;
> +
> +	/* check if watchdog has been disabled since last call */
> +	if (!adapter->vf.watchdog.enabled)
> +		return;
> +
> +	/* If in reset then poll vfr_inprogress register for completion */
> +	if (adapter->vf.vf_reset) {
> +		vfr_inprogress = iavf_vfr_inprogress(hw);
> +
> +		if (!vfr_inprogress) {
> +			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
> +				adapter->eth_dev->data->name);
> +			adapter->vf.vf_reset = false;
> +		}
> +	/* If not in reset then poll vfr_inprogress register for VFLR event */
> +	} else {
> +		vfr_inprogress = iavf_vfr_inprogress(hw);
> +
> +		if (vfr_inprogress) {
> +			PMD_DRV_LOG(INFO,
> +				"VF \"%s\" reset event has been detected by watchdog",
> +				adapter->eth_dev->data->name);
> +
> +			/* enter reset state with VFLR event */
> +			adapter->vf.vf_reset = true;
> +
> +			rte_eth_dev_callback_process(adapter->eth_dev,
> +				RTE_ETH_EVENT_INTR_RESET, NULL);
> +		}
> +	}
> +
> +	/* re-alarm watchdog */
> +	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
> +			&iavf_dev_watchdog, cb_arg);
> +
> +	if (rc)
> +		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
> +			adapter->eth_dev->data->name);
> +}
> +
> +static void
> +iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
> +{
> +	int rc;
> +
> +	PMD_DRV_LOG(INFO, "Enabling device watchdog");
> +
> +	adapter->vf.watchdog.enabled = 1;
> +	adapter->vf.watchdog.period_us = period_us;
> +
> +	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
> +			&iavf_dev_watchdog, (void *)adapter);
> +	if (rc)
> +		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
> +}
> +
> +static void
> +iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
> +{
> +	PMD_DRV_LOG(INFO, "Disabling device watchdog");
> +
> +	adapter->vf.watchdog.enabled = 0;
> +	adapter->vf.watchdog.period_us = 0;
> +}
> +
>  static int
>  iavf_set_mc_addr_list(struct rte_eth_dev *dev,
>  			struct rte_ether_addr *mc_addrs,
> @@ -2448,6 +2537,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
> 
>  	iavf_default_rss_disable(adapter);
> 
> +
> +	/* Start device watchdog, set polling period to 500us */
> +	iavf_dev_watchdog_enable(adapter, 500);
> +

Besides checking VFGEN_RSTAT, there is a process to handle VIRTCHNL_OP_EVENT  from PF. What is the change for? Any scenario which VIRTCHNL_OP_EVENT  doesn't cover? 
And how is the 500us been determined? 


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-04  2:15     ` Wu, Jingjing
@ 2021-10-04 11:18       ` Nicolau, Radu
  2021-10-04 14:21         ` Nicolau, Radu
  2021-10-08  6:19         ` Wu, Jingjing
  0 siblings, 2 replies; 128+ messages in thread
From: Nicolau, Radu @ 2021-10-04 11:18 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin


On 10/4/2021 3:15 AM, Wu, Jingjing wrote:
>
>> -----Original Message-----
>> From: Nicolau, Radu <radu.nicolau@intel.com>
>> Sent: Friday, October 1, 2021 5:52 PM
>> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
>> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
>> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
>> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
>> Nicolau, Radu <radu.nicolau@intel.com>
>> Subject: [PATCH v4 6/6] net/iavf: add watchdog for VFLR
>>
>> Add watchdog to iAVF PMD which support monitoring the VFLR register. If
>> the device is not already in reset then if a VF reset in progress is
>> detected then notfiy user through callback and set into reset state.
>> If the device is already in reset then poll for completion of reset.
>>
>> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
>> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
>> ---
>>   drivers/net/iavf/iavf.h        |  6 +++
>>   drivers/net/iavf/iavf_ethdev.c | 97 ++++++++++++++++++++++++++++++++++
>>   2 files changed, 103 insertions(+)
>>
>> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
>> index d5f574b4b3..4481d2e134 100644
>> --- a/drivers/net/iavf/iavf.h
>> +++ b/drivers/net/iavf/iavf.h
>> @@ -212,6 +212,12 @@ struct iavf_info {
>>   	int cmd_retval; /* return value of the cmd response from PF */
>>   	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
>>
>> +	struct {
>> +		uint8_t enabled:1;
>> +		uint64_t period_us;
>> +	} watchdog;
>> +	/** iAVF watchdog configuration */
>> +
>>   	/* Event from pf */
>>   	bool dev_closed;
>>   	bool link_up;
>> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
>> index aad6a28585..d02aa9c1c5 100644
>> --- a/drivers/net/iavf/iavf_ethdev.c
>> +++ b/drivers/net/iavf/iavf_ethdev.c
>> @@ -24,6 +24,7 @@
>>   #include <rte_malloc.h>
>>   #include <rte_memzone.h>
>>   #include <rte_dev.h>
>> +#include <rte_alarm.h>
>>
>>   #include "iavf.h"
>>   #include "iavf_rxtx.h"
>> @@ -239,6 +240,94 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
>>   	return 0;
>>   }
>>
>> +
>> +static int
>> +iavf_vfr_inprogress(struct iavf_hw *hw)
>> +{
>> +	int inprogress = 0;
>> +
>> +	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
>> +		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
>> +		VIRTCHNL_VFR_INPROGRESS)
>> +		inprogress = 1;
>> +
>> +	if (inprogress)
>> +		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
>> +
>> +	return inprogress;
>> +}
>> +
>> +static void
>> +iavf_dev_watchdog(void *cb_arg)
>> +{
>> +	struct iavf_adapter *adapter = cb_arg;
>> +	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
>> +	int vfr_inprogress = 0, rc = 0;
>> +
>> +	/* check if watchdog has been disabled since last call */
>> +	if (!adapter->vf.watchdog.enabled)
>> +		return;
>> +
>> +	/* If in reset then poll vfr_inprogress register for completion */
>> +	if (adapter->vf.vf_reset) {
>> +		vfr_inprogress = iavf_vfr_inprogress(hw);
>> +
>> +		if (!vfr_inprogress) {
>> +			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
>> +				adapter->eth_dev->data->name);
>> +			adapter->vf.vf_reset = false;
>> +		}
>> +	/* If not in reset then poll vfr_inprogress register for VFLR event */
>> +	} else {
>> +		vfr_inprogress = iavf_vfr_inprogress(hw);
>> +
>> +		if (vfr_inprogress) {
>> +			PMD_DRV_LOG(INFO,
>> +				"VF \"%s\" reset event has been detected by watchdog",
>> +				adapter->eth_dev->data->name);
>> +
>> +			/* enter reset state with VFLR event */
>> +			adapter->vf.vf_reset = true;
>> +
>> +			rte_eth_dev_callback_process(adapter->eth_dev,
>> +				RTE_ETH_EVENT_INTR_RESET, NULL);
>> +		}
>> +	}
>> +
>> +	/* re-alarm watchdog */
>> +	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
>> +			&iavf_dev_watchdog, cb_arg);
>> +
>> +	if (rc)
>> +		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
>> +			adapter->eth_dev->data->name);
>> +}
>> +
>> +static void
>> +iavf_dev_watchdog_enable(struct iavf_adapter *adapter, uint64_t period_us)
>> +{
>> +	int rc;
>> +
>> +	PMD_DRV_LOG(INFO, "Enabling device watchdog");
>> +
>> +	adapter->vf.watchdog.enabled = 1;
>> +	adapter->vf.watchdog.period_us = period_us;
>> +
>> +	rc = rte_eal_alarm_set(adapter->vf.watchdog.period_us,
>> +			&iavf_dev_watchdog, (void *)adapter);
>> +	if (rc)
>> +		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
>> +}
>> +
>> +static void
>> +iavf_dev_watchdog_disable(struct iavf_adapter *adapter)
>> +{
>> +	PMD_DRV_LOG(INFO, "Disabling device watchdog");
>> +
>> +	adapter->vf.watchdog.enabled = 0;
>> +	adapter->vf.watchdog.period_us = 0;
>> +}
>> +
>>   static int
>>   iavf_set_mc_addr_list(struct rte_eth_dev *dev,
>>   			struct rte_ether_addr *mc_addrs,
>> @@ -2448,6 +2537,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
>>
>>   	iavf_default_rss_disable(adapter);
>>
>> +
>> +	/* Start device watchdog, set polling period to 500us */
>> +	iavf_dev_watchdog_enable(adapter, 500);
>> +
> Besides checking VFGEN_RSTAT, there is a process to handle VIRTCHNL_OP_EVENT  from PF. What is the change for? Any scenario which VIRTCHNL_OP_EVENT  doesn't cover?
> And how is the 500us been determined?

Hi Jingjing, thanks for reviewing, I think this can be handled with the 
VIRTCHNL_OP_EVENT  with no need for a watchdog alarm, I will rework the 
patch.



^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-04 11:18       ` Nicolau, Radu
@ 2021-10-04 14:21         ` Nicolau, Radu
  2021-10-08  6:19         ` Wu, Jingjing
  1 sibling, 0 replies; 128+ messages in thread
From: Nicolau, Radu @ 2021-10-04 14:21 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin


On 10/4/2021 12:18 PM, Nicolau, Radu wrote:
>
> On 10/4/2021 3:15 AM, Wu, Jingjing wrote:
>>
>>> -----Original Message-----
>>> From: Nicolau, Radu <radu.nicolau@intel.com>
>>> Sent: Friday, October 1, 2021 5:52 PM
>>> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei 
>>> <beilei.xing@intel.com>
>>> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, 
>>> Abhijit
>>> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; 
>>> Richardson, Bruce
>>> <bruce.richardson@intel.com>; Ananyev, Konstantin 
>>> <konstantin.ananyev@intel.com>;
>>> Nicolau, Radu <radu.nicolau@intel.com>
>>> Subject: [PATCH v4 6/6] net/iavf: add watchdog for VFLR
>>>
>>> Add watchdog to iAVF PMD which support monitoring the VFLR register. If
>>> the device is not already in reset then if a VF reset in progress is
>>> detected then notfiy user through callback and set into reset state.
>>> If the device is already in reset then poll for completion of reset.
>>>
>>> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
>>> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
>>> ---
>>>   drivers/net/iavf/iavf.h        |  6 +++
>>>   drivers/net/iavf/iavf_ethdev.c | 97 
>>> ++++++++++++++++++++++++++++++++++
>>>   2 files changed, 103 insertions(+)
>>>
>>> ...
>>>
>> Besides checking VFGEN_RSTAT, there is a process to handle 
>> VIRTCHNL_OP_EVENT  from PF. What is the change for? Any scenario 
>> which VIRTCHNL_OP_EVENT  doesn't cover?
>> And how is the 500us been determined?
>
> Hi Jingjing, thanks for reviewing, I think this can be handled with 
> the VIRTCHNL_OP_EVENT  with no need for a watchdog alarm, I will 
> rework the patch.
>
Hi Jingjing I went over this with Declan, the reason it was added is 
that we can actually have a hardware initiated reset that may not 
trigger an event; and also the kernel driver is implementing a similar 
mechanism. The 500us seems indeed excessive I will update the patch to 
use a configurable value with the default of 5ms, as the kernel driver does.


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (6 preceding siblings ...)
  2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-06  9:28 ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 1/6] common/iavf: " Radu Nicolau
                     ` (5 more replies)
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (8 subsequent siblings)
  16 siblings, 6 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (6):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |  215 +-
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  218 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  709 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |   91 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  166 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4118 insertions(+), 321 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch



2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 1/6] common/iavf: add iAVF IPsec inline crypto support
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 2/6] net/iavf: rework tx path Radu Nicolau
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             | 215 +++++++-
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 775 insertions(+), 10 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..1f8f8ae5fd 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -709,11 +709,29 @@ enum iavf_rx_prog_status_desc_error_bits {
 #define IAVF_FOUR_BIT_MASK	0xF
 #define IAVF_EIGHTEEN_BIT_MASK	0x3FFFF
 
-/* TX Descriptor */
+/* TX Data Descriptor */
 struct iavf_tx_desc {
-	__le64 buffer_addr; /* Address of descriptor's data buf */
-	__le64 cmd_type_offset_bsz;
-};
+	union {
+		struct {
+			__le64 buffer_addr; /* Addr of descriptor's data buf */
+			__le64 cmd_type_offset_bsz;
+		};
+		struct {
+			__le64 qw0; /**< data buffer address */
+			__le64 qw1; /**< dtyp, cmd, offset, buf_sz and l2tag1 */
+		};
+		struct {
+			__le64 buffer_addr;	/**< Data buffer address */
+			__le64 type:4;		/**< Descriptor type */
+			__le64 cmd:12;		/**< Command field */
+			__le64 offset_l2len:7;	/**< L2 header length */
+			__le64 offset_l3len:7;	/**< L3 header length */
+			__le64 offset_l4len:4;	/**< L4 header length */
+			__le64 buffer_sz:14;	/**< Data buffer size */
+			__le64 l2tag1:16;	/**< L2 Tag 1 value */
+		} debug __rte_packed;
+	};
+} __rte_packed;
 
 #define IAVF_TXD_QW1_DTYPE_SHIFT	0
 #define IAVF_TXD_QW1_DTYPE_MASK		(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -723,6 +741,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
@@ -734,7 +753,7 @@ enum iavf_tx_desc_dtype_value {
 #define IAVF_TXD_QW1_CMD_SHIFT	4
 #define IAVF_TXD_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_QW1_CMD_SHIFT)
 
-enum iavf_tx_desc_cmd_bits {
+enum iavf_tx_data_desc_cmd_bits {
 	IAVF_TX_DESC_CMD_EOP			= 0x0001,
 	IAVF_TX_DESC_CMD_RS			= 0x0002,
 	IAVF_TX_DESC_CMD_ICRC			= 0x0004,
@@ -778,18 +797,79 @@ enum iavf_tx_desc_length_fields {
 #define IAVF_TXD_QW1_L2TAG1_SHIFT	48
 #define IAVF_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << IAVF_TXD_QW1_L2TAG1_SHIFT)
 
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
 /* Context descriptors */
 struct iavf_tx_context_desc {
+	union {
+		struct {
 	__le32 tunneling_params;
 	__le16 l2tag2;
 	__le16 rsvd;
 	__le64 type_cmd_tso_mss;
 };
-
-#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	0
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le32 tunneling;
+			__le16 l2tag2;
+			__le16 rsvd0;
+			__le64 type:4;
+			__le64 cmd:7;
+			__le64 ipsec:7;
+			__le64 rsvd1:12;
+			__le64 tlen_tsyn:18;
+			__le64 rsvd2:2;
+			__le64 mss_target_vsi:14;
+		} debug __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_TXD_CTX_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_CTX_QW1_DTYPE_SHIFT)
 
-#define IAVF_TXD_CTX_QW1_CMD_SHIFT	4
+#define IAVF_TXD_CTX_QW1_CMD_SHIFT	(4)
 #define IAVF_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << IAVF_TXD_CTX_QW1_CMD_SHIFT)
 
 enum iavf_tx_ctx_desc_cmd_bits {
@@ -804,6 +884,63 @@ enum iavf_tx_ctx_desc_cmd_bits {
 	IAVF_TX_CTX_DESC_SWPE		= 0x40
 };
 
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
 struct iavf_nop_desc {
 	__le64 rsvd;
 	__le64 dtype_cmd;
@@ -911,6 +1048,68 @@ enum iavf_tx_ctx_desc_eipt_offload {
 #define IAVF_TXD_CTX_QW0_L4T_CS_SHIFT	23
 #define IAVF_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(IAVF_TXD_CTX_QW0_L4T_CS_SHIFT)
 
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
 /* Statistics collected by each port, VSI, VEB, and S-channel */
 struct iavf_eth_stats {
 	u64 rx_bytes;			/* gorc */
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 83f51d889f..5cc326c035 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2290,6 +2295,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 2/6] net/iavf: rework tx path
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 1/6] common/iavf: " Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 536 +++++++++++++++------------
 drivers/net/iavf/iavf_rxtx.h         |   9 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 319 insertions(+), 236 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 6de8ad3fe3..d2cb6d59bc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1048,27 +1048,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1388,7 +1392,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1530,7 +1534,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1768,7 +1772,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2038,7 +2042,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
 	desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-	if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
+	if ((txd[desc_to_clean_to].qw1 &
 			rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
 			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
 		PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done "
@@ -2054,7 +2058,7 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 		nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
 					last_desc_cleaned);
 
-	txd[desc_to_clean_to].cmd_type_offset_bsz = 0;
+	txd[desc_to_clean_to].qw1 = 0;
 
 	txq->last_desc_cleaned = desc_to_clean_to;
 	txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -2062,190 +2066,296 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
+{
+	/* fill descriptor type field */
+	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc->qw0, m);
+	else
+		desc->qw0 = 0;
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->qw1 = desc_template;
+
+	/* set data buffer address */
+	desc->qw0 = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
 
-	return ctx_desc;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* set data buffer size */
+	desc->qw1 |= (((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->qw0 = rte_cpu_to_le_64(desc->qw0);
+	desc->qw1 = rte_cpu_to_le_64(desc->qw1);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2253,122 +2363,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->qw1 |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
@@ -2869,7 +2951,7 @@ iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 			desc -= txq->nb_tx_desc;
 	}
 
-	status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+	status = &txq->tx_ring[desc].qw1;
 	mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
 	expect = rte_cpu_to_le_64(
 		 IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..1bc47614ea 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -555,9 +555,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(tx_desc->qw1 &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +571,7 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->qw0, tx_desc->qw1);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 1/6] common/iavf: " Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 137 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 52 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index b3bd078111..8c7f7c0bed 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -189,6 +189,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -340,9 +341,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 7f86050df3..5c62443999 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -23,8 +23,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1685,7 +1717,7 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 	 * before iavf_read_msg_from_pf.
 	 */
 	rte_intr_disable(&pci_dev->intr_handle);
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	rte_intr_enable(&pci_dev->intr_handle);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES");
@@ -1721,7 +1753,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
@@ -1734,3 +1766,4 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |   96 +
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  201 +-
 drivers/net/iavf/iavf_rxtx.h                  |   94 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2762 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8c7f7c0bed..934ef48278 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -217,6 +217,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -239,6 +240,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -250,11 +252,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -273,6 +278,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -415,5 +422,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index c131461517..294be1a022 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -29,6 +29,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -70,6 +71,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -974,6 +985,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1730,6 +1746,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1738,8 +1755,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2357,6 +2374,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 1fe270fb22..d85e82a950 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..9635b41679
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1904 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..d8d7d6649e
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index d2cb6d59bc..3f8c0822b7 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -504,6 +508,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -688,6 +698,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -732,9 +744,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -758,6 +770,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1075,6 +1091,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1393,6 +1473,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1535,6 +1617,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1773,6 +1857,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2085,6 +2171,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2117,15 +2215,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2148,7 +2250,8 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	/* fill descriptor type field */
 	desc->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
@@ -2158,8 +2261,12 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc->qw1, ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2173,6 +2280,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2286,6 +2425,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2314,7 +2464,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2324,16 +2476,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2384,7 +2543,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2392,7 +2551,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1bc47614ea..e009387aff 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,24 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -565,6 +644,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 5c62443999..d99b03c8b2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1767,3 +1767,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	return 0;
 }
 
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index f2010a8337..385770b043 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -10,7 +10,7 @@ endif
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -20,6 +20,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 934ef48278..d5f574b4b3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -92,6 +92,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -101,7 +120,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 294be1a022..aad6a28585 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -89,6 +89,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -144,21 +145,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -176,7 +193,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1543,7 +1560,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1605,7 +1622,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1625,6 +1653,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1635,6 +1684,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1647,11 +1697,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e009387aff..18bf8f4921 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v5 6/6] net/iavf: add watchdog for VFLR
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-06  9:28   ` Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-06  9:28 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 93 ++++++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d5f574b4b3..cc03985127 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -29,6 +29,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD      5000
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -212,6 +214,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index aad6a28585..43b64fb6db 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -24,6 +24,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -239,6 +240,90 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2448,6 +2533,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2527,6 +2617,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-04 11:18       ` Nicolau, Radu
  2021-10-04 14:21         ` Nicolau, Radu
@ 2021-10-08  6:19         ` Wu, Jingjing
  2021-10-08 10:09           ` Nicolau, Radu
  1 sibling, 1 reply; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-08  6:19 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin

> > Besides checking VFGEN_RSTAT, there is a process to handle
> VIRTCHNL_OP_EVENT  from PF. What is the change for? Any scenario which
> VIRTCHNL_OP_EVENT  doesn't cover?
> > And how is the 500us been determined?
> 
> Hi Jingjing, thanks for reviewing, I think this can be handled with the
> VIRTCHNL_OP_EVENT  with no need for a watchdog alarm, I will rework the
> patch.
> 
Hi, Radu, I saw the patch is reworked, but looks like watchdog is still there. So what is the scenario
VIRTCHNL_OP_EVENT  doesn't cover?


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR
  2021-10-08  6:19         ` Wu, Jingjing
@ 2021-10-08 10:09           ` Nicolau, Radu
  0 siblings, 0 replies; 128+ messages in thread
From: Nicolau, Radu @ 2021-10-08 10:09 UTC (permalink / raw)
  To: Wu, Jingjing, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin


On 10/8/2021 7:19 AM, Wu, Jingjing wrote:
>>> Besides checking VFGEN_RSTAT, there is a process to handle
>> VIRTCHNL_OP_EVENT  from PF. What is the change for? Any scenario which
>> VIRTCHNL_OP_EVENT  doesn't cover?
>>> And how is the 500us been determined?
>> Hi Jingjing, thanks for reviewing, I think this can be handled with the
>> VIRTCHNL_OP_EVENT  with no need for a watchdog alarm, I will rework the
>> patch.
>>
> Hi, Radu, I saw the patch is reworked, but looks like watchdog is still there. So what is the scenario
> VIRTCHNL_OP_EVENT  doesn't cover?

Hi Jingjing I went over this with Declan, the reason it was added is 
that we can actually have a hardware initiated reset that may not 
trigger an event; and also the kernel driver is implementing a similar 
mechanism.

Would it be more acceptable to have it disabled by default?



^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (7 preceding siblings ...)
  2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-08 10:19 ` Radu Nicolau
  2021-10-08 10:19   ` [dpdk-dev] [PATCH v6 1/6] common/iavf: " Radu Nicolau
                     ` (5 more replies)
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (7 subsequent siblings)
  16 siblings, 6 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:19 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Depends on series "new features for ipsec and security libraries"
https://patchwork.dpdk.org/project/dpdk/list/?series=19320


Radu Nicolau (6):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  218 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  713 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  198 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  168 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4091 insertions(+), 311 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 

v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 1/6] common/iavf: add iAVF IPsec inline crypto support
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-08 10:19   ` Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 2/6] net/iavf: rework tx path Radu Nicolau
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:19 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 2/6] net/iavf: rework tx path
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-08 10:19   ` [dpdk-dev] [PATCH v6 1/6] common/iavf: " Radu Nicolau
@ 2021-10-08 10:20   ` Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:20 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 539 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 433 insertions(+), 233 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 87afc0b4cb..aab11720df 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,305 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *) desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
+
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			hdrlen += m->outer_l3_len;
+
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
 
-	return ctx_desc;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2378,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index e210b913d6..7d9058e700 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-08 10:19   ` [dpdk-dev] [PATCH v6 1/6] common/iavf: " Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-10-08 10:20   ` Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:20 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 940d4f79ec..49d553a51c 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 3275687927..4ec438412d 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -144,7 +144,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -156,8 +157,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -253,9 +260,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -331,18 +340,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -366,7 +397,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -387,7 +418,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -416,7 +447,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -469,12 +500,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -519,7 +551,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -563,7 +595,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -603,7 +635,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -646,7 +678,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -667,7 +699,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -698,7 +730,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -726,7 +758,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -759,7 +791,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -801,7 +833,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -845,7 +877,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -891,7 +923,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -923,7 +955,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -955,7 +987,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1047,7 +1079,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1088,7 +1120,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1129,7 +1161,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1189,7 +1221,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1216,7 +1248,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1251,7 +1283,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1291,7 +1323,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1318,7 +1350,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1345,7 +1377,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1405,7 +1437,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1452,7 +1484,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1493,7 +1525,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1516,7 +1548,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1542,7 +1574,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1563,7 +1595,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1596,7 +1628,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1641,7 +1673,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-08 10:20   ` Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:20 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |   93 +-
 drivers/net/iavf/iavf_vchnl.c                 |   30 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2827 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 49d553a51c..017b478510 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -243,6 +244,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -254,11 +256,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -277,6 +282,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 5a5a7f59e1..3c1cc1f4d5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -938,6 +944,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -947,7 +956,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1748,6 +1764,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1756,8 +1773,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2404,6 +2421,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b86d99e57d..34f83c8083 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..9635b41679
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1904 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->s_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->s_addr));
+	memcpy(eth->d_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->d_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index aab11720df..e55dcdf337 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *) desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2301,6 +2441,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2329,7 +2480,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2339,16 +2492,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2399,7 +2559,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2407,7 +2567,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 7d9058e700..754f04d734 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 4ec438412d..10058b6beb 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-08 10:20   ` Radu Nicolau
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:20 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 017b478510..ec347086b4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 3c1cc1f4d5..2aeb5370a3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1559,7 +1576,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1621,7 +1638,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1641,6 +1669,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1651,6 +1700,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1663,11 +1713,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 754f04d734..377850140d 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v6 6/6] net/iavf: add watchdog for VFLR
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-08 10:20   ` Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-08 10:20 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 93 ++++++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ec347086b4..dc1bcce3f7 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 2aeb5370a3..3f30a074ab 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,90 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2495,6 +2580,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2578,6 +2668,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (8 preceding siblings ...)
  2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-13 15:33 ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 1/6] common/iavf: " Radu Nicolau
                     ` (5 more replies)
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (6 subsequent siblings)
  16 siblings, 6 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Depends on series "new features for ipsec and security libraries"
https://patchwork.dpdk.org/project/dpdk/list/?series=19593


Radu Nicolau (6):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR

 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  198 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  168 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 17 files changed, 4089 insertions(+), 311 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 

v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdod by default

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 1/6] common/iavf: add iAVF IPsec inline crypto support
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 2/6] net/iavf: rework tx path Radu Nicolau
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 2/6] net/iavf: rework tx path
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 1/6] common/iavf: " Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88661e5d74..4cc05bec53 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *) desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index c7a868cf1d..20b6405df8 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index ee1e905525..288c5ca1f1 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 3/6] net/iavf: add support for asynchronous virt channel messages
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 1/6] common/iavf: " Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 2/6] net/iavf: rework tx path Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 940d4f79ec..49d553a51c 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 3275687927..4ec438412d 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -144,7 +144,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -156,8 +157,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -253,9 +260,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -331,18 +340,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -366,7 +397,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -387,7 +418,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -416,7 +447,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -469,12 +500,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -519,7 +551,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -563,7 +595,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -603,7 +635,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -646,7 +678,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -667,7 +699,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -698,7 +730,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -726,7 +758,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -759,7 +791,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -801,7 +833,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -845,7 +877,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -891,7 +923,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -923,7 +955,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -955,7 +987,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1047,7 +1079,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1088,7 +1120,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1129,7 +1161,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1189,7 +1221,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1216,7 +1248,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1251,7 +1283,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1291,7 +1323,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1318,7 +1350,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1345,7 +1377,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1405,7 +1437,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1452,7 +1484,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1493,7 +1525,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1516,7 +1548,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1542,7 +1574,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1563,7 +1595,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1596,7 +1628,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1641,7 +1673,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 4/6] net/iavf: add iAVF IPsec inline crypto support
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   16 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1904 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |   93 +-
 drivers/net/iavf/iavf_vchnl.c                 |   30 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2827 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 49d553a51c..017b478510 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -243,6 +244,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -254,11 +256,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev *eth_dev;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -277,6 +282,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 5a5a7f59e1..3c1cc1f4d5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -938,6 +944,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -947,7 +956,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1748,6 +1764,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1756,8 +1773,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2404,6 +2421,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b86d99e57d..34f83c8083 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,14 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
+
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2076,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..25624485b3
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1904 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 4cc05bec53..982c1e3b6c 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *) desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 20b6405df8..fa848716d1 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 4ec438412d..10058b6beb 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 5/6] net/iavf: add xstats support for inline IPsec crypto
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 017b478510..ec347086b4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 3c1cc1f4d5..2aeb5370a3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1559,7 +1576,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1621,7 +1638,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1641,6 +1669,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1651,6 +1700,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1663,11 +1713,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index fa848716d1..c25bfa29dc 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v7 6/6] net/iavf: add watchdog for VFLR
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-13 15:33   ` Radu Nicolau
  5 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-13 15:33 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ec347086b4..dc1bcce3f7 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 2aeb5370a3..b94c0f6342 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2495,6 +2581,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2578,6 +2669,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (9 preceding siblings ...)
  2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-15 10:15 ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 1/7] common/iavf: " Radu Nicolau
                     ` (6 more replies)
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
                   ` (5 subsequent siblings)
  16 siblings, 7 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Depends on series "new features for ipsec and security libraries"
https://patchwork.dpdk.org/project/dpdk/list/?series=19658


Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1891 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  198 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  168 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4088 insertions(+), 311 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 2/7] net/iavf: rework tx path
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 1/7] common/iavf: " Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88661e5d74..8a73c929dc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 9591e45cb0..d8a62e2667 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e2..2c3bb0b05f 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 1/7] common/iavf: " Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af47..67051f29a8 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0f4dd21d44..da4654957a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1891 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |   93 +-
 drivers/net/iavf/iavf_vchnl.c                 |   30 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2813 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 67051f29a8..e98c42ba08 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7e4d256122..6663e923db 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -938,6 +944,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -947,7 +956,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1748,6 +1764,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1756,8 +1773,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2406,6 +2423,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b86d99e57d..8dfa549980 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2075,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..70980ed995
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1891 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = aead->iv.length;
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = conf->crypto_xform->aead.iv.length;
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 8a73c929dc..611e53ccfc 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index d8a62e2667..5b40392c79 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index da4654957a..3be001ff5d 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index e98c42ba08..90a7344bd5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 6663e923db..8f35107f3a 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1559,7 +1576,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1621,7 +1638,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1641,6 +1669,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1651,6 +1700,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1663,11 +1713,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 5b40392c79..e8fecbd7bc 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  2021-10-18  5:34     ` Wu, Jingjing
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 90a7344bd5..f06979b4da 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 8f35107f3a..9df9aeae7f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2497,6 +2583,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2580,6 +2671,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v8 7/7] net/iavf: update doc with inline crypto support
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-15 10:15   ` Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-15 10:15 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index d00ca934c3..78f649c25f 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -28,6 +28,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -64,3 +65,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41b..038e7c02b6 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 7bb8768b67..3703d11369 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -95,6 +95,7 @@ New Features
 
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR
  2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-18  5:34     ` Wu, Jingjing
  0 siblings, 0 replies; 128+ messages in thread
From: Wu, Jingjing @ 2021-10-18  5:34 UTC (permalink / raw)
  To: Nicolau, Radu, Xing, Beilei
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Zhang, Qi Z, Richardson,
	Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 15, 2021 6:15 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Ananyev, Konstantin <konstantin.ananyev@intel.com>;
> Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v8 6/7] net/iavf: add watchdog for VFLR
> 
> Add watchdog to iAVF PMD which support monitoring the VFLR register. If
> the device is not already in reset then if a VF reset in progress is
> detected then notfiy user through callback and set into reset state.
> If the device is already in reset then poll for completion of reset.
> 
> The watchdog is disabled by default, to enable it set
> IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>

Acked-by: Jingjing Wu <jingjing.wu@intel.com>

^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (10 preceding siblings ...)
  2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-18 10:10 ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 1/7] common/iavf: " Radu Nicolau
                     ` (6 more replies)
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
                   ` (4 subsequent siblings)
  16 siblings, 7 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Depends on series "new features for ipsec and security libraries"
https://patchwork.dpdk.org/project/dpdk/list/?series=19593


Radu Nicolau (4):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: Add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR


 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1895 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  198 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  168 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4092 insertions(+), 311 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 2/7] net/iavf: rework tx path
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 1/7] common/iavf: " Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c10..11b7fea36f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e1..d05a525ef9 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e2..2c3bb0b05f 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 1/7] common/iavf: " Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af47..67051f29a8 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0f4dd21d44..da4654957a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgement
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1895 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |   93 +-
 drivers/net/iavf/iavf_vchnl.c                 |   30 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2817 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 67051f29a8..e98c42ba08 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 18428049d8..f6e6ff4745 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -938,6 +944,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -947,7 +956,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1748,6 +1764,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1756,8 +1773,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2405,6 +2422,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b86d99e57d..8dfa549980 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2075,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..f66f016a07
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1895 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
+
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 11b7fea36f..995ec79475 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metdata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metdata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index d05a525ef9..500ffb2d06 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index da4654957a..3be001ff5d 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,33 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
+
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index e98c42ba08..90a7344bd5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f6e6ff4745..41d859ed57 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1559,7 +1576,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1621,7 +1638,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1641,6 +1669,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1651,6 +1700,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1663,11 +1713,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 500ffb2d06..5e39d2bc96 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 6/7] net/iavf: add watchdog for VFLR
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 90a7344bd5..f06979b4da 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 41d859ed57..4e9f592cdc 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2496,6 +2582,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2579,6 +2670,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v9 7/7] net/iavf: update doc with inline crypto support
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-18 10:10   ` Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-18 10:10 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index d00ca934c3..78f649c25f 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -28,6 +28,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -64,3 +65,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41b..038e7c02b6 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index ec2a788789..9e0d2122cc 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -112,6 +112,7 @@ New Features
 
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (11 preceding siblings ...)
  2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-19  9:23 ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 1/7] common/iavf: " Radu Nicolau
                     ` (6 more replies)
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                   ` (3 subsequent siblings)
  16 siblings, 7 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  198 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  167 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4090 insertions(+), 311 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 

v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC
v10: removed blank lines at EOF

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 2/7] net/iavf: rework tx path
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 1/7] common/iavf: " Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 88bbd40c10..11b7fea36f 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f4ae2fd6e1..d05a525ef9 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index edb54991e2..2c3bb0b05f 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 1/7] common/iavf: " Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 34bfa9af47..67051f29a8 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0f4dd21d44..da4654957a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgment
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |   93 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2815 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 67051f29a8..e98c42ba08 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 611f1f7722..ac66e383a6 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -939,6 +945,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -948,7 +957,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -990,6 +1001,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1733,6 +1749,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1741,8 +1758,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2390,6 +2407,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b86d99e57d..8dfa549980 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1635,6 +1635,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1709,6 +1710,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2018,6 +2022,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2064,6 +2075,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 4794d1fb80..a471c0331f 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -449,6 +449,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -462,6 +463,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..b697e62579
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 11b7fea36f..28cc834caf 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index d05a525ef9..500ffb2d06 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index da4654957a..4827313ee7 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index e98c42ba08..90a7344bd5 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index ac66e383a6..25476965ab 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1544,7 +1561,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1606,7 +1623,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1626,6 +1654,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1636,6 +1685,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1648,11 +1698,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 500ffb2d06..5e39d2bc96 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 6/7] net/iavf: add watchdog for VFLR
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 90a7344bd5..f06979b4da 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 25476965ab..7221e342ad 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2481,6 +2567,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2564,6 +2655,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v10 7/7] net/iavf: update doc with inline crypto support
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-19  9:23   ` Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-19  9:23 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index d00ca934c3..78f649c25f 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -28,6 +28,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -64,3 +65,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 2efdd1a41b..038e7c02b6 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index bd6a388c9d..9f258d481d 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -112,6 +112,7 @@ New Features
 
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (12 preceding siblings ...)
  2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-26 10:38 ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 1/7] common/iavf: " Radu Nicolau
                     ` (7 more replies)
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                   ` (2 subsequent siblings)
  16 siblings, 8 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  220 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  167 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4101 insertions(+), 322 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC
v10: removed blank lines at EOF
v11: rebased patchset
 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 2/7] net/iavf: rework tx path
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 1/7] common/iavf: " Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index ac4db117f5..dbf71747c0 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+				PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6 |
+			PKT_TX_OUTER_IP_CKSUM)) {
+	case PKT_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case PKT_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & PKT_TX_TCP_SEG)
-		return 1;
-	if (flags & PKT_TX_VLAN_PKT &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & PKT_TX_VLAN_PKT) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & PKT_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & PKT_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & PKT_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & PKT_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & PKT_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & PKT_TX_L4_MASK) {
+	switch (m->ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case PKT_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & PKT_TX_VLAN_PKT &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & PKT_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & PKT_TX_VLAN_PKT &&
-			   txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 2d7f6b1b2d..c4ce9aa99e 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -405,6 +405,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -555,9 +661,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -571,8 +678,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 1de43b9b8e..c902142da9 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 1/7] common/iavf: " Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 12f541f539..efc90f9072 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 0f4dd21d44..da4654957a 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(&pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(&pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgment
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |  115 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2826 insertions(+), 32 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index efc90f9072..6df31a649e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index b2b413c247..9ab42b6452 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -924,6 +930,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -933,7 +942,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -975,6 +986,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 364904fa02..2befa125ac 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f2b54e1944..3681a96b31 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..b697e62579
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index dbf71747c0..73d8898b66 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= PKT_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & PKT_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & PKT_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & PKT_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG | PKT_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & PKT_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index c4ce9aa99e..500ffb2d06 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -24,22 +24,23 @@
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
-		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
-		RTE_ETH_TX_OFFLOAD_TCP_TSO)
+		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
+		DEV_TX_OFFLOAD_TCP_TSO |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
-		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
-		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |		 \
-		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		 \
-		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		 \
-		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		 \
-		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
+		DEV_TX_OFFLOAD_QINQ_INSERT |		 \
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
+		DEV_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_VLAN |		 \
-		RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		DEV_RX_OFFLOAD_CHECKSUM |		 \
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_RX_OFFLOAD_VLAN |		 \
+		DEV_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
@@ -47,7 +48,7 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
@@ -65,7 +66,8 @@
 		PKT_TX_VLAN_PKT |		 \
 		PKT_TX_IP_CKSUM |		 \
 		PKT_TX_L4_MASK |		 \
-		PKT_TX_TCP_SEG)
+		PKT_TX_TCP_SEG |		 \
+		DEV_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -163,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -211,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -245,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -347,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -366,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -393,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -405,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -672,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index da4654957a..4827313ee7 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 6df31a649e..f314373ab0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 9ab42b6452..e1e6f49dec 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1529,7 +1546,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1591,7 +1608,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1611,6 +1639,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1621,6 +1670,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1633,11 +1683,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 500ffb2d06..5e39d2bc96 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 6/7] net/iavf: add watchdog for VFLR
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  2021-10-26 12:30   ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f314373ab0..40c8045de1 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index e1e6f49dec..e2897441aa 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2466,6 +2552,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2549,6 +2640,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v11 7/7] net/iavf: update doc with inline crypto support
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-26 10:38   ` Radu Nicolau
  2021-10-26 12:30   ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 10:38 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index dd3519e1e2..01f514239e 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -27,6 +27,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -65,3 +66,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index a1e236ad75..fd235e1463 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index b327c2bfca..6c0cb55f17 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -152,6 +152,7 @@ New Features
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added PPPoL2TPv2oUDP RSS hash based on inner IP address and TCP/UDP port.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec inline crypto support
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (6 preceding siblings ...)
  2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
@ 2021-10-26 12:30   ` Zhang, Qi Z
  7 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2021-10-26 12:30 UTC (permalink / raw)
  To: Nicolau, Radu
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Wu, Jingjing, Xing, Beilei,
	Richardson, Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Tuesday, October 26, 2021 6:38 PM
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha,
> Abhijit <abhijit.sinha@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v11 0/7] iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and tunnel over IPv4
> and IPv6, as well as supporting the offload for ESP over UDP, and
> inconjunction with TSO for UDP and TCP flows.
> 
> Radu Nicolau (7):
>   common/iavf: add iAVF IPsec inline crypto support
>   net/iavf: rework tx path
>   net/iavf: add support for asynchronous virt channel messages
>   net/iavf: add iAVF IPsec inline crypto support
>   net/iavf: add xstats support for inline IPsec crypto
>   net/iavf: add watchdog for VFLR
>   net/iavf: update doc with inline crypto support
> 
>  doc/guides/nics/features/iavf.ini             |    2 +
>  doc/guides/nics/intel_vf.rst                  |   10 +
>  doc/guides/rel_notes/release_21_11.rst        |    1 +
>  drivers/common/iavf/iavf_type.h               |    1 +
>  drivers/common/iavf/virtchnl.h                |   17 +-
>  drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
>  drivers/net/iavf/iavf.h                       |   52 +-
>  drivers/net/iavf/iavf_ethdev.c                |  219 +-
>  drivers/net/iavf/iavf_generic_flow.c          |   15 +
>  drivers/net/iavf/iavf_generic_flow.h          |    2 +
>  drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
>  drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
>  .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
>  drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
>  drivers/net/iavf/iavf_rxtx.h                  |  220 +-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
>  drivers/net/iavf/iavf_vchnl.c                 |  167 +-
>  drivers/net/iavf/meson.build                  |    3 +-
>  drivers/net/iavf/rte_pmd_iavf.h               |    1 +
>  drivers/net/iavf/version.map                  |    3 +
>  20 files changed, 4101 insertions(+), 322 deletions(-)  create mode 100644
> drivers/common/iavf/virtchnl_inline_ipsec.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> 
> --
> v2: small updates and fixes in the flow related section
> v3: split the huge patch and address feedback
> v4: small changes due to dependencies changes
> v5: updated the watchdow patch
> v6: rebased and updated the common section
> v7: fixed TSO issue and disabled watchdog by default
> v8: rebased to next-net-intel and added doc updates
> v9: fixed IV len for AEAD and GMAC
> v10: removed blank lines at EOF
> v11: rebased patchset
> 
> 2.25.1

For patch 1/7 and patch 7/7

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (13 preceding siblings ...)
  2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-26 13:56 ` Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 1/7] common/iavf: " Radu Nicolau
                     ` (8 more replies)
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
  16 siblings, 9 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   52 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
 drivers/net/iavf/iavf_rxtx.h                  |  212 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  167 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4098 insertions(+), 317 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC
v10: removed blank lines at EOF
v11: rebased patchset
v12: rebased patchset to RC1
 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (7 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 1/7] common/iavf: " Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-27  0:43     ` Zhang, Qi Z
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (6 subsequent siblings)
  8 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 431 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 52d919ca1b..128691aaf1 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ |
+				RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6 |
+			RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
+	case RTE_MBUF_F_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & RTE_MBUF_F_TX_TCP_SEG)
-		return 1;
-	if (flags & RTE_MBUF_F_TX_VLAN &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
 }
 
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
 {
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+}
+
+
+static inline void
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
+{
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+	switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
 	case RTE_MBUF_F_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
 
-	return ctx_desc;
-}
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
+
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
+
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
+
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
+
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
 
-		if (nb_used > txq->nb_free) {
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-			    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 84351011f1..1da1278452 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -403,6 +403,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -553,9 +659,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -569,8 +676,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index d4f4d705b7..6d42ae9373 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask = _mm_set_epi16(
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 1/7] common/iavf: " Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (5 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  16 ++++
 drivers/net/iavf/iavf_vchnl.c | 138 +++++++++++++++++++++-------------
 2 files changed, 101 insertions(+), 53 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 12f541f539..efc90f9072 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	rte_atomic32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -345,9 +346,24 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	rte_atomic32_set(&vf->pend_cmd_count, 1);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	rte_atomic32_set(&vf->pend_cmd_count, 2);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index bb65dbf04f..53d1506677 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,40 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc == VIRTCHNL_OP_INLINE_IPSEC_CRYPTO
+					&& imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					rte_atomic32_dec(&vf->pend_cmd_count);
+					if (rte_atomic32_read(
+						&vf->pend_cmd_count) == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +396,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +417,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +446,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +499,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +550,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +594,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +634,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +677,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +698,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +729,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +757,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +790,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +832,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +876,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +922,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +954,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +986,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1078,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1119,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1160,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1220,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1247,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1282,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1322,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1349,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1376,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1436,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1483,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1524,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1547,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1573,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1594,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1627,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1672,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1718,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1761,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-27  0:36     ` Zhang, Qi Z
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (4 subsequent siblings)
  8 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgment
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |  107 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2823 insertions(+), 27 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index efc90f9072..6df31a649e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f892306f18..dba505494f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 364904fa02..2befa125ac 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f2b54e1944..3681a96b31 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..633fedf860
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 128691aaf1..80438f9f8a 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG | RTE_MBUF_F_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1da1278452..b88c81f8f6 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
-		RTE_ETH_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -36,10 +37,10 @@
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_VLAN |		 \
-		RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		DEV_RX_OFFLOAD_CHECKSUM |		 \
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_RX_OFFLOAD_VLAN |		 \
+		DEV_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
@@ -47,23 +48,26 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
 
-#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
+#define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
+		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG)
 
-#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 |		 \
+#define IAVF_TX_OFFLOAD_MASK (  \
+		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
 		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
 		RTE_MBUF_F_TX_IPV6 |			 \
 		RTE_MBUF_F_TX_IPV4 |			 \
 		RTE_MBUF_F_TX_VLAN |		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -161,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -209,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -243,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -364,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 53d1506677..353521d726 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
                     ` (3 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 6df31a649e..f314373ab0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index dba505494f..783a10060c 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1527,7 +1544,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1589,7 +1606,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1609,6 +1637,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1619,6 +1668,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1631,11 +1681,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index b88c81f8f6..c7156d1daa 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 6/7] net/iavf: add watchdog for VFLR
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
                     ` (2 subsequent siblings)
  8 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f314373ab0..40c8045de1 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 783a10060c..ae0f8f17f4 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2466,6 +2552,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2549,6 +2640,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v12 7/7] net/iavf: update doc with inline crypto support
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-26 13:56   ` Radu Nicolau
  2021-10-27  0:36   ` [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
  2021-10-28 14:47   ` Ferruh Yigit
  8 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-26 13:56 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index dd3519e1e2..01f514239e 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -27,6 +27,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -65,3 +66,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index a1e236ad75..fd235e1463 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 1ccac87b73..9c13ceed1c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -163,6 +163,7 @@ New Features
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added PPPoL2TPv2oUDP RSS hash based on inner IP address and TCP/UDP port.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-27  0:36     ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2021-10-27  0:36 UTC (permalink / raw)
  To: Nicolau, Radu, Wu, Jingjing, Xing, Beilei, Ray Kinsella
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Richardson, Bruce, Ananyev,
	Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Tuesday, October 26, 2021 9:57 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Ray Kinsella <mdr@ashroe.eu>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha,
> Abhijit <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and
> tunnel over IPv4 and IPv6, as well as supporting the offload for
> ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
> Implement support for rte_security packet metadata
> 
> Add definition for IPsec descriptors, extend support for offload
> in data and context descriptor to support
> 
> Add support to virtual channel mailbox for IPsec Crypto request
> operations. IPsec Crypto requests receive an initial acknowledgment
> from phsyical function driver of receipt of request and then an
> asynchronous response with success/failure of request including any
> response data.
> 
> Add enhanced descriptor debugging
> 
> Refactor of scalar tx burst function to support integration of offload
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
> ---
>  drivers/net/iavf/iavf.h                       |   10 +
>  drivers/net/iavf/iavf_ethdev.c                |   41 +-
>  drivers/net/iavf/iavf_generic_flow.c          |   15 +
>  drivers/net/iavf/iavf_generic_flow.h          |    2 +
>  drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
>  drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
>  .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
>  drivers/net/iavf/iavf_rxtx.c                  |  202 +-
>  drivers/net/iavf/iavf_rxtx.h                  |  107 +-
>  drivers/net/iavf/iavf_vchnl.c                 |   29 +
>  drivers/net/iavf/meson.build                  |    3 +-
>  drivers/net/iavf/rte_pmd_iavf.h               |    1 +
>  drivers/net/iavf/version.map                  |    3 +
>  13 files changed, 2823 insertions(+), 27 deletions(-)
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
> index efc90f9072..6df31a649e 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -221,6 +221,7 @@ struct iavf_info {
>  	rte_spinlock_t flow_ops_lock;
>  	struct iavf_parser_list rss_parser_list;
>  	struct iavf_parser_list dist_parser_list;
> +	struct iavf_parser_list ipsec_crypto_parser_list;
> 
>  	struct iavf_fdir_info fdir; /* flow director info */
>  	/* indicate large VF support enabled or not */
> @@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
>  	IAVF_PROTO_XTR_IPV6_FLOW,
>  	IAVF_PROTO_XTR_TCP,
>  	IAVF_PROTO_XTR_IP_OFFSET,
> +	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
>  	IAVF_PROTO_XTR_MAX,
>  };
> 
> @@ -256,11 +258,14 @@ struct iavf_devargs {
>  	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
>  };
> 
> +struct iavf_security_ctx;
> +
>  /* Structure to store private data for each VF instance. */
>  struct iavf_adapter {
>  	struct iavf_hw hw;
>  	struct rte_eth_dev_data *dev_data;
>  	struct iavf_info vf;
> +	struct iavf_security_ctx *security_ctx;
> 
>  	bool rx_bulk_alloc_allowed;
>  	/* For vector PMD */
> @@ -279,6 +284,8 @@ struct iavf_adapter {
>  	(&((struct iavf_adapter *)adapter)->vf)
>  #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
>  	(&((struct iavf_adapter *)adapter)->hw)
> +#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
> +	(((struct iavf_adapter *)adapter)->security_ctx)
> 
>  /* IAVF_VSI_TO */
>  #define IAVF_VSI_TO_HW(vsi) \
> @@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
>  			uint16_t size);
>  void iavf_tm_conf_init(struct rte_eth_dev *dev);
>  void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
> +int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
> +		uint8_t *msg, size_t msg_len,
> +		uint8_t *resp_msg, size_t resp_msg_len);
>  extern const struct rte_tm_ops iavf_tm_ops;
>  #endif /* _IAVF_ETHDEV_H_ */
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index f892306f18..dba505494f 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -30,6 +30,7 @@
>  #include "iavf_rxtx.h"
>  #include "iavf_generic_flow.h"
>  #include "rte_pmd_iavf.h"
> +#include "iavf_ipsec_crypto.h"
> 
>  /* devargs */
>  #define IAVF_PROTO_XTR_ARG         "proto_xtr"
> @@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[]
> = {
>  	[IAVF_PROTO_XTR_IP_OFFSET] = {
>  		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
>  		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
> +	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
> +		.param = {
> +		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
> +		.ol_flag =
> +			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
>  };
> 
>  static int iavf_dev_configure(struct rte_eth_dev *dev);
> @@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
>  	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
>  				  false);
> 
> +	/* free iAVF security device context all related resources */
> +	iavf_security_ctx_destroy(adapter);
> +
>  	adapter->stopped = 1;
>  	dev->data->dev_started = 0;
> 
> @@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
>  static int
>  iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> *dev_info)
>  {
> -	struct iavf_info *vf =
> IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> +	struct iavf_adapter *adapter =
> +		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +	struct iavf_info *vf = &adapter->vf;
> 
>  	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
>  	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
> @@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>  	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
>  		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
> 
> +	if (iavf_ipsec_crypto_supported(adapter)) {
> +		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> +	}
> +
>  	dev_info->default_rxconf = (struct rte_eth_rxconf) {
>  		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
>  		.rx_drop_en = 0,
> @@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
>  		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
>  		{ "tcp",       IAVF_PROTO_XTR_TCP       },
>  		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
> +		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
>  	};
>  	uint32_t i;
> 
> @@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
>  			return xtr_type_map[i].type;
>  	}
> 
> -	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
> -		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
> +	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
> +			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
> 
>  	return -1;
>  }
> @@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
>  		goto flow_init_err;
>  	}
> 
> +	/** Check if the IPsec Crypto offload is supported and create
> +	 *  security_ctx if it is.
> +	 */
> +	if (iavf_ipsec_crypto_supported(adapter)) {
> +		/* Initialize security_ctx only for primary process*/
> +		ret = iavf_security_ctx_create(adapter);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security
> instance");
> +			return ret;
> +		}
> +
> +		ret = iavf_security_init(adapter);
> +		if (ret) {
> +			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto
> resources");
> +			return ret;
> +		}
> +	}
> +
>  	iavf_default_rss_disable(adapter);
> 
>  	return 0;
> diff --git a/drivers/net/iavf/iavf_generic_flow.c
> b/drivers/net/iavf/iavf_generic_flow.c
> index 364904fa02..2befa125ac 100644
> --- a/drivers/net/iavf/iavf_generic_flow.c
> +++ b/drivers/net/iavf/iavf_generic_flow.c
> @@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
>  	TAILQ_INIT(&vf->flow_list);
>  	TAILQ_INIT(&vf->rss_parser_list);
>  	TAILQ_INIT(&vf->dist_parser_list);
> +	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
>  	rte_spinlock_init(&vf->flow_ops_lock);
> 
>  	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> @@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser
> *parser,
>  	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
>  		list = &vf->dist_parser_list;
>  		TAILQ_INSERT_HEAD(list, parser_node, node);
> +	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
> +		list = &vf->ipsec_crypto_parser_list;
> +		TAILQ_INSERT_HEAD(list, parser_node, node);
>  	} else {
>  		return -EINVAL;
>  	}
> @@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
> 
>  	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
>  				    actions, error);
> +	if (*engine)
> +		return 0;
> +
> +	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
> +			pattern, actions, error);
> +	if (*engine)
> +		return 0;
> 
>  	if (!*engine) {
>  		rte_flow_error_set(error, EINVAL,
> @@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
>  		return flow;
>  	}
> 
> +	/* Special case for inline crypto egress flows */
> +	if (attr->egress && actions[0].type ==
> RTE_FLOW_ACTION_TYPE_SECURITY)
> +		goto free_flow;
> +
>  	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
>  			&engine, iavf_parse_engine_create, error);
>  	if (ret < 0) {
> diff --git a/drivers/net/iavf/iavf_generic_flow.h
> b/drivers/net/iavf/iavf_generic_flow.h
> index f2b54e1944..3681a96b31 100644
> --- a/drivers/net/iavf/iavf_generic_flow.h
> +++ b/drivers/net/iavf/iavf_generic_flow.h
> @@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct
> iavf_adapter *ad,
>  /* engine types. */
>  enum iavf_flow_engine_type {
>  	IAVF_FLOW_ENGINE_NONE = 0,
> +	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
>  	IAVF_FLOW_ENGINE_FDIR,
>  	IAVF_FLOW_ENGINE_HASH,
>  	IAVF_FLOW_ENGINE_MAX,
> @@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
>   */
>  enum iavf_flow_classification_stage {
>  	IAVF_FLOW_STAGE_NONE = 0,
> +	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
>  	IAVF_FLOW_STAGE_RSS,
>  	IAVF_FLOW_STAGE_DISTRIBUTOR,
>  	IAVF_FLOW_STAGE_MAX,
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c
> b/drivers/net/iavf/iavf_ipsec_crypto.c
> new file mode 100644
> index 0000000000..633fedf860
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto.c
> @@ -0,0 +1,1894 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#include <rte_cryptodev.h>
> +#include <rte_ethdev.h>
> +#include <rte_security_driver.h>
> +#include <rte_security.h>
> +
> +#include "iavf.h"
> +#include "iavf_rxtx.h"
> +#include "iavf_log.h"
> +#include "iavf_generic_flow.h"
> +
> +#include "iavf_ipsec_crypto.h"
> +#include "iavf_ipsec_crypto_capabilities.h"
> +
> +/**
> + * iAVF IPsec Crypto Security Context
> + */
> +struct iavf_security_ctx {
> +	struct iavf_adapter *adapter;
> +	int pkt_md_offset;
> +	struct rte_cryptodev_capabilities *crypto_capabilities;
> +};
> +
> +/**
> + * iAVF IPsec Crypto Security Session Parameters
> + */
> +struct iavf_security_session {
> +	struct iavf_adapter *adapter;
> +
> +	enum rte_security_ipsec_sa_mode mode;
> +	enum rte_security_ipsec_tunnel_type type;
> +	enum rte_security_ipsec_sa_direction direction;
> +
> +	struct {
> +		uint32_t spi; /* Security Parameter Index */
> +		uint32_t hw_idx; /* SA Index in hardware table */
> +	} sa;
> +
> +	struct {
> +		uint8_t enabled :1;
> +		union {
> +			uint64_t value;
> +			struct {
> +				uint32_t hi;
> +				uint32_t low;
> +			};
> +		};
> +	} esn;
> +
> +	struct {
> +		uint8_t enabled :1;
> +	} udp_encap;
> +
> +	size_t iv_sz;
> +	size_t icv_sz;
> +	size_t block_sz;
> +
> +	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
> +};
> +/**
> + *  IV Length field in IPsec Tx Desc uses the following encoding:
> + *
> + *  0B - 0
> + *  4B - 1
> + *  8B - 2
> + *  16B - 3
> + *
> + * but we also need the IV Length for TSO to correctly calculate the total
> + * header length so placing it in the upper 6-bits here for easier reterival.
> + */
> +static inline uint8_t
> +calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
> +{
> +	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
> +
> +	switch (iv_sz) {
> +	case 4:
> +		iv_length = IAVF_IPSEC_IV_LEN_DW;
> +		break;
> +	case 8:
> +		iv_length = IAVF_IPSEC_IV_LEN_DDW;
> +		break;
> +	case 16:
> +		iv_length = IAVF_IPSEC_IV_LEN_QDW;
> +		break;
> +	}
> +
> +	return (iv_sz << 2) | iv_length;
> +}
> +
> +static unsigned int
> +iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
> +{
> +	return sizeof(struct iavf_security_session);
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_capability(struct iavf_security_ctx *iavf_sctx,
> +	uint32_t algo, uint32_t type)
> +{
> +	const struct rte_cryptodev_capabilities *capability;
> +	int i = 0;
> +
> +	capability = &iavf_sctx->crypto_capabilities[i];
> +
> +	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
> +		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
> +			capability->sym.xform_type == type &&
> +			capability->sym.cipher.algo == algo)
> +			return &capability->sym;
> +		/** try next capability */
> +		capability = &iavf_crypto_capabilities[i++];
> +	}
> +
> +	return NULL;
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_auth_capability(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_auth_algorithm algo)
> +{
> +	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_cipher_algorithm algo)
> +{
> +	return get_capability(iavf_sctx, algo,
> RTE_CRYPTO_SYM_XFORM_CIPHER);
> +}
> +static const struct rte_cryptodev_symmetric_capability *
> +get_aead_capability(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_aead_algorithm algo)
> +{
> +	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
> +}
> +
> +static uint16_t
> +get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_cipher_algorithm algo)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_cipher_capability(iavf_sctx, algo);
> +	if (capability == NULL)
> +		return 0;
> +
> +	return capability->cipher.block_size;
> +}
> +
> +static uint16_t
> +get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_aead_algorithm algo)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_aead_capability(iavf_sctx, algo);
> +	if (capability == NULL)
> +		return 0;
> +
> +	return capability->cipher.block_size;
> +}
> +
> +static uint16_t
> +get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
> +	enum rte_crypto_auth_algorithm algo)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_auth_capability(iavf_sctx, algo);
> +	if (capability == NULL)
> +		return 0;
> +
> +	return capability->auth.block_size;
> +}
> +
> +static uint8_t
> +calc_context_desc_cipherblock_sz(size_t len)
> +{
> +	switch (len) {
> +	case 8:
> +		return 0x2;
> +	case 16:
> +		return 0x3;
> +	default:
> +		return 0x0;
> +	}
> +}
> +
> +static int
> +valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
> +{
> +	if (len < min || len > max)
> +		return false;
> +
> +	if (increment == 0)
> +		return true;
> +
> +	if ((len - min) % increment)
> +		return false;
> +
> +	/* make sure it fits in the key array */
> +	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
> +		return false;
> +
> +	return true;
> +}
> +
> +static int
> +valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
> +	struct rte_crypto_auth_xform *auth)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_auth_capability(iavf_sctx, auth->algo);
> +	if (capability == NULL)
> +		return false;
> +
> +	/* verify key size */
> +	if (!valid_length(auth->key.length,
> +		capability->auth.key_size.min,
> +		capability->auth.key_size.max,
> +		capability->aead.key_size.increment))
> +		return false;
> +
> +	return true;
> +}
> +
> +static int
> +valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
> +	struct rte_crypto_cipher_xform *cipher)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_cipher_capability(iavf_sctx, cipher->algo);
> +	if (capability == NULL)
> +		return false;
> +
> +	/* verify key size */
> +	if (!valid_length(cipher->key.length,
> +		capability->cipher.key_size.min,
> +		capability->cipher.key_size.max,
> +		capability->cipher.key_size.increment))
> +		return false;
> +
> +	return true;
> +}
> +
> +static int
> +valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
> +	struct rte_crypto_aead_xform *aead)
> +{
> +	const struct rte_cryptodev_symmetric_capability *capability;
> +
> +	capability = get_aead_capability(iavf_sctx, aead->algo);
> +	if (capability == NULL)
> +		return false;
> +
> +	/* verify key size */
> +	if (!valid_length(aead->key.length,
> +		capability->aead.key_size.min,
> +		capability->aead.key_size.max,
> +		capability->aead.key_size.increment))
> +		return false;
> +
> +	return true;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
> +	struct rte_security_session_conf *conf)
> +{
> +	/** validate security action/protocol selection */
> +	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> +		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
> +		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
> +		return -EINVAL;
> +	}
> +
> +	/** validate IPsec protocol selection */
> +	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
> +		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
> +		return -EINVAL;
> +	}
> +
> +	/** validate selected options */
> +	if (conf->ipsec.options.copy_dscp ||
> +		conf->ipsec.options.copy_flabel ||
> +		conf->ipsec.options.copy_df ||
> +		conf->ipsec.options.dec_ttl ||
> +		conf->ipsec.options.ecn ||
> +		conf->ipsec.options.stats) {
> +		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +		return -EINVAL;
> +	}
> +
> +	/**
> +	 * Validate crypto xforms parameters.
> +	 *
> +	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
> +	 * for non-AEAD crypto transforms we explicitly only support
> CIPHER/AUTH
> +	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
> +	 */
> +	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
> +			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +			return -EINVAL;
> +		}
> +	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS
> &&
> +		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER
> &&
> +		conf->crypto_xform->next &&
> +		conf->crypto_xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		if (!valid_cipher_xform(iavf_sctx,
> +				&conf->crypto_xform->cipher)) {
> +			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +			return -EINVAL;
> +		}
> +
> +		if (!valid_auth_xform(iavf_sctx,
> +				&conf->crypto_xform->next->auth)) {
> +			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +			return -EINVAL;
> +		}
> +	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS
> &&
> +		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
> +		conf->crypto_xform->next &&
> +		conf->crypto_xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
> +			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +			return -EINVAL;
> +		}
> +
> +		if (!valid_cipher_xform(iavf_sctx,
> +				&conf->crypto_xform->next->cipher)) {
> +			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +			return -EINVAL;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +static void
> +sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +	struct rte_crypto_aead_xform *aead, uint32_t salt)
> +{
> +	cfg->crypto_type = VIRTCHNL_AEAD;
> +
> +	switch (aead->algo) {
> +	case RTE_CRYPTO_AEAD_AES_CCM:
> +		cfg->algo_type = VIRTCHNL_AES_CCM; break;
> +	case RTE_CRYPTO_AEAD_AES_GCM:
> +		cfg->algo_type = VIRTCHNL_AES_GCM; break;
> +	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
> +		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
> +	default:
> +		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
> +		break;
> +	}
> +
> +	cfg->key_len = aead->key.length;
> +	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
> +	cfg->digest_len = aead->digest_length;
> +	cfg->salt = salt;
> +
> +	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
> +}
> +
> +static void
> +sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
> +{
> +	cfg->crypto_type = VIRTCHNL_CIPHER;
> +
> +	switch (cipher->algo) {
> +	case RTE_CRYPTO_CIPHER_AES_CBC:
> +		cfg->algo_type = VIRTCHNL_AES_CBC; break;
> +	case RTE_CRYPTO_CIPHER_3DES_CBC:
> +		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
> +	case RTE_CRYPTO_CIPHER_NULL:
> +		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
> +	case RTE_CRYPTO_CIPHER_AES_CTR:
> +		cfg->algo_type = VIRTCHNL_AES_CTR;
> +		cfg->salt = salt;
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
> +		break;
> +	}
> +
> +	cfg->key_len = cipher->key.length;
> +	cfg->iv_len = cipher->iv.length;
> +	cfg->salt = salt;
> +
> +	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
> +}
> +
> +static void
> +sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +	struct rte_crypto_auth_xform *auth, uint32_t salt)
> +{
> +	cfg->crypto_type = VIRTCHNL_AUTH;
> +
> +	switch (auth->algo) {
> +	case RTE_CRYPTO_AUTH_NULL:
> +		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
> +	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
> +		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
> +	case RTE_CRYPTO_AUTH_AES_CMAC:
> +		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
> +	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
> +		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
> +	case RTE_CRYPTO_AUTH_MD5_HMAC:
> +		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
> +	case RTE_CRYPTO_AUTH_SHA1_HMAC:
> +		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
> +	case RTE_CRYPTO_AUTH_SHA224_HMAC:
> +		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
> +	case RTE_CRYPTO_AUTH_SHA256_HMAC:
> +		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
> +	case RTE_CRYPTO_AUTH_SHA384_HMAC:
> +		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
> +	case RTE_CRYPTO_AUTH_SHA512_HMAC:
> +		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
> +	case RTE_CRYPTO_AUTH_AES_GMAC:
> +		cfg->algo_type = VIRTCHNL_AES_GMAC;
> +		cfg->salt = salt;
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "Invalid auth parameters");
> +		break;
> +	}
> +
> +	cfg->key_len = auth->key.length;
> +	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
> +	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
> +		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
> +	else
> +		cfg->iv_len = auth->iv.length;
> +	cfg->digest_len = auth->digest_length;
> +
> +	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
> +}
> +
> +/**
> + * Send SA add virtual channel request to Inline IPsec driver.
> + *
> + * Inline IPsec driver expects SPI and destination IP adderss to be in host
> + * order, but DPDK APIs are network order, therefore we need to do a htonl
> + * conversion of these parameters.
> + */
> +static uint32_t
> +iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
> +	struct rte_security_session_conf *conf)
> +{
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	struct virtchnl_ipsec_sa_cfg *sa_cfg;
> +	size_t request_len, response_len;
> +
> +	int rc;
> +
> +	request_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sa_cfg);
> +
> +	request = rte_malloc("iavf-sad-add-request", request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
> +	response = rte_malloc("iavf-sad-add-response", response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/* set SA configuration params */
> +	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
> +
> +	sa_cfg->spi = conf->ipsec.spi;
> +	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
> +	sa_cfg->virtchnl_direction =
> +		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
> +			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
> +
> +	if (conf->ipsec.options.esn) {
> +		sa_cfg->esn_enabled = 1;
> +		sa_cfg->esn_hi = conf->ipsec.esn.hi;
> +		sa_cfg->esn_low = conf->ipsec.esn.low;
> +	}
> +
> +	if (conf->ipsec.options.udp_encap)
> +		sa_cfg->udp_encap_enabled = 1;
> +
> +	/* Set outer IP params */
> +	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
> +		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
> +
> +		*((uint32_t *)sa_cfg->dst_addr)	=
> +			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
> +	} else {
> +		uint32_t *v6_dst_addr =
> +			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
> +
> +		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
> +
> +		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
> +		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
> +		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
> +		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
> +	}
> +
> +	/* set crypto params */
> +	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
> +			&conf->crypto_xform->aead, conf->ipsec.salt);
> +
> +	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> +		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
> +			&conf->crypto_xform->cipher, conf->ipsec.salt);
> +		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
> +			&conf->crypto_xform->next->auth, conf->ipsec.salt);
> +
> +	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
> +			&conf->crypto_xform->auth, conf->ipsec.salt);
> +		if (conf->crypto_xform->auth.algo !=
> RTE_CRYPTO_AUTH_AES_GMAC)
> +			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
> +			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
> +	}
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response id */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id)
> +		rc = -EFAULT;
> +	else
> +		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
> +update_cleanup:
> +	rte_free(response);
> +	rte_free(request);
> +
> +	return rc;
> +}
> +
> +static void
> +set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata
> *template,
> +	struct iavf_security_session *sess)
> +{
> +	template->sa_idx = sess->sa.hw_idx;
> +
> +	if (sess->udp_encap.enabled)
> +		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
> +
> +	if (sess->esn.enabled)
> +		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
> +
> +	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
> +	template->ctx_desc_ipsec_params =
> +			calc_context_desc_cipherblock_sz(sess->block_sz) |
> +			((uint8_t)(sess->icv_sz >> 2) << 3);
> +}
> +
> +static void
> +set_session_parameter(struct iavf_security_ctx *iavf_sctx,
> +	struct iavf_security_session *sess,
> +	struct rte_security_session_conf *conf, uint32_t sa_idx)
> +{
> +	sess->adapter = iavf_sctx->adapter;
> +
> +	sess->mode = conf->ipsec.mode;
> +	sess->direction = conf->ipsec.direction;
> +
> +	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
> +		sess->type = conf->ipsec.tunnel.type;
> +
> +	sess->sa.spi = conf->ipsec.spi;
> +	sess->sa.hw_idx = sa_idx;
> +
> +	if (conf->ipsec.options.esn) {
> +		sess->esn.enabled = 1;
> +		sess->esn.value = conf->ipsec.esn.value;
> +	}
> +
> +	if (conf->ipsec.options.udp_encap)
> +		sess->udp_encap.enabled = 1;
> +
> +	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +		sess->block_sz = get_aead_blocksize(iavf_sctx,
> +			conf->crypto_xform->aead.algo);
> +		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
> +		sess->icv_sz = conf->crypto_xform->aead.digest_length;
> +	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> +		sess->block_sz = get_cipher_blocksize(iavf_sctx,
> +			conf->crypto_xform->cipher.algo);
> +		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
> +		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
> +	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +		if (conf->crypto_xform->auth.algo ==
> RTE_CRYPTO_AUTH_AES_GMAC) {
> +			sess->block_sz = get_auth_blocksize(iavf_sctx,
> +				RTE_CRYPTO_SYM_XFORM_AUTH);

There is a warning due to implicit conversion from 'enum rte_crypto_sym_xform_type' to 'enum rte_crypto_auth_algorithm
Replace above line with (enum rte_crypto_auth_algorithm)RTE_CRYPTO_SYM_XFORM_AUTH); during merge.


> +			sess->iv_sz = conf->crypto_xform->auth.iv.length;
> +			sess->icv_sz = conf->crypto_xform->auth.digest_length;
> +		} else {
> +			sess->block_sz = get_cipher_blocksize(iavf_sctx,
> +				conf->crypto_xform->next->cipher.algo);
> +			sess->iv_sz =
> +				conf->crypto_xform->next->cipher.iv.length;
> +			sess->icv_sz = conf->crypto_xform->auth.digest_length;
> +		}
> +	}
> +
> +	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
> +}
> +
> +/**
> + * Create IPsec Security Association for inline IPsec Crypto offload.
> + *
> + * 1. validate session configuration parameters
> + * 2. allocate session memory from mempool
> + * 3. add SA to hardware database
> + * 4. set session parameters
> + * 5. create packet metadata template for datapath
> + */
> +static int
> +iavf_ipsec_crypto_session_create(void *device,
> +				 struct rte_security_session_conf *conf,
> +				 struct rte_security_session *session,
> +				 struct rte_mempool *mempool)
> +{
> +	struct rte_eth_dev *ethdev = device;
> +	struct iavf_adapter *adapter =
> +		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +	struct iavf_security_session *iavf_session = NULL;
> +	int sa_idx;
> +	int ret = 0;
> +
> +	/* validate that all SA parameters are valid for device */
> +	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
> +	if (ret)
> +		return ret;
> +
> +	/* allocate session context */
> +	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
> +		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
> +		return -ENOMEM;
> +	}
> +
> +	/* add SA to hardware database */
> +	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
> +	if (sa_idx < 0) {
> +		PMD_DRV_LOG(ERR,
> +			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
> +			conf->ipsec.spi,
> +			conf->ipsec.mode ==
> +				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
> +				"transport" : "tunnel",
> +			conf->ipsec.direction ==
> +				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
> +				"inbound" : "outbound");
> +
> +		rte_mempool_put(mempool, iavf_session);
> +		return -EFAULT;
> +	}
> +
> +	/* save data plane required session parameters */
> +	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
> +
> +	/* save to security session private data */
> +	set_sec_session_private_data(session, iavf_session);
> +
> +	return 0;
> +}
> +
> +/**
> + * Check if valid ipsec crypto action.
> + * SPI must be non-zero and SPI in session must match SPI value
> + * passed into function.
> + *
> + * returns: 0 if invalid session or SPI value equal zero
> + * returns: 1 if valid
> + */
> +uint32_t
> +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
> +	const struct rte_security_session *session, uint32_t spi)
> +{
> +	struct iavf_adapter *adapter =
> +		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +	struct iavf_security_session *sess = session->sess_private_data;
> +
> +	/* verify we have a valid session and that it belong to this adapter */
> +	if (unlikely(sess == NULL || sess->adapter != adapter))
> +		return false;
> +
> +	/* SPI value must be non-zero */
> +	if (spi == 0)
> +		return false;
> +	/* Session SPI must patch flow SPI*/
> +	else if (sess->sa.spi == spi) {
> +		return true;
> +		/**
> +		 * TODO: We should add a way of tracking valid hw SA indices to
> +		 * make validation less brittle
> +		 */
> +	}
> +
> +		return true;
> +}
> +
> +/**
> + * Send virtual channel security policy add request to IES driver.
> + *
> + * IES driver expects SPI and destination IP adderss to be in host
> + * order, but DPDK APIs are network order, therefore we need to do a htonl
> + * conversion of these parameters.
> + */
> +int
> +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
> +	uint32_t esp_spi,
> +	uint8_t is_v4,
> +	rte_be32_t v4_dst_addr,
> +	uint8_t *v6_dst_addr,
> +	uint8_t drop)
> +{
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	size_t request_len, response_len;
> +	int rc = 0;
> +
> +	request_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sp_cfg);
> +	request = rte_malloc("iavf-inbound-security-policy-add-request",
> +				request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/* ESP SPI */
> +	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
> +
> +	/* Destination IP  */
> +	if (is_v4) {
> +		request->ipsec_data.sp_cfg->table_id =
> +				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
> +		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
> +	} else {
> +		request->ipsec_data.sp_cfg->table_id =
> +				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
> +		request->ipsec_data.sp_cfg->dip[0] =
> +				htonl(((uint32_t *)v6_dst_addr)[0]);
> +		request->ipsec_data.sp_cfg->dip[1] =
> +				htonl(((uint32_t *)v6_dst_addr)[1]);
> +		request->ipsec_data.sp_cfg->dip[2] =
> +				htonl(((uint32_t *)v6_dst_addr)[2]);
> +		request->ipsec_data.sp_cfg->dip[3] =
> +				htonl(((uint32_t *)v6_dst_addr)[3]);
> +	}
> +
> +	request->ipsec_data.sp_cfg->drop = drop;
> +
> +	/** Traffic Class/Congestion Domain currently not support */
> +	request->ipsec_data.sp_cfg->set_tc = 0;
> +	request->ipsec_data.sp_cfg->cgd = 0;
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
> +	response = rte_malloc("iavf-inbound-security-policy-add-response",
> +				response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id)
> +		rc = -EFAULT;
> +	else
> +		rc = response->ipsec_data.sp_cfg_resp->rule_id;
> +
> +update_cleanup:
> +	rte_free(request);
> +	rte_free(response);
> +
> +	return rc;
> +}
> +
> +static uint32_t
> +iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
> +	struct iavf_security_session *sess)
> +{
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	size_t request_len, response_len;
> +	int rc = 0;
> +
> +	request_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sa_update);
> +	request = rte_malloc("iavf-sa-update-request", request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_resp);
> +	response = rte_malloc("iavf-sa-update-response", response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/* set request params */
> +	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
> +	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id)
> +		rc = -EFAULT;
> +	else
> +		rc = response->ipsec_data.ipsec_resp->resp;
> +
> +update_cleanup:
> +	rte_free(request);
> +	rte_free(response);
> +
> +	return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_update(void *device,
> +		struct rte_security_session *session,
> +		struct rte_security_session_conf *conf)
> +{
> +	struct iavf_adapter *adapter = NULL;
> +	struct iavf_security_session *iavf_sess = NULL;
> +	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +	int rc = 0;
> +
> +	adapter =
> IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
> +
> +	/* verify we have a valid session and that it belong to this adapter */
> +	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +		return -EINVAL;
> +
> +	/* update esn hi 32-bits */
> +	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
> +		/**
> +		 * Update ESN in hardware for inbound SA. Store in
> +		 * iavf_security_session for outbound SA for use
> +		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
> +		 */
> +		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
> +			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
> +					iavf_sess);
> +		else
> +			iavf_sess->esn.hi = conf->ipsec.esn.hi;
> +	}
> +
> +	return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
> +		struct rte_security_session *session __rte_unused,
> +		struct rte_security_stats *stats __rte_unused)
> +{
> +	return -EOPNOTSUPP;
> +}
> +
> +int
> +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
> +	uint8_t is_v4, uint32_t flow_id)
> +{
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	size_t request_len, response_len;
> +	int rc = 0;
> +
> +	request_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sp_destroy);
> +	request = rte_malloc("iavf-sp-del-request", request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_resp);
> +	response = rte_malloc("iavf-sp-del-response", response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/* set security policy params */
> +	request->ipsec_data.sp_destroy->table_id = is_v4 ?
> +			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
> +			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
> +	request->ipsec_data.sp_destroy->rule_id = flow_id;
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id)
> +		rc = -EFAULT;
> +	else
> +		return response->ipsec_data.ipsec_status->status;
> +
> +update_cleanup:
> +	rte_free(request);
> +	rte_free(response);
> +
> +	return rc;
> +}
> +
> +static uint32_t
> +iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
> +	struct iavf_security_session *sess)
> +{
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	size_t request_len, response_len;
> +
> +	int rc = 0;
> +
> +	request_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_sa_destroy);
> +
> +	request = rte_malloc("iavf-sa-del-request", request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_resp);
> +
> +	response = rte_malloc("iavf-sa-del-response", response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/**
> +	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
> +	 * field is zero, all SA's associated with VF will be deleted.
> +	 */
> +	if (sess) {
> +		request->ipsec_data.sa_destroy->flag = 0x1;
> +		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
> +	} else {
> +		request->ipsec_data.sa_destroy->flag = 0x0;
> +	}
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id)
> +		rc = -EFAULT;
> +
> +	/**
> +	 * Delete status will be the same bitmask as sa_destroy request flag if
> +	 * deletes successful
> +	 */
> +	if (request->ipsec_data.sa_destroy->flag !=
> +			response->ipsec_data.ipsec_status->status)
> +		rc = -EFAULT;
> +
> +update_cleanup:
> +	rte_free(response);
> +	rte_free(request);
> +
> +	return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_destroy(void *device,
> +		struct rte_security_session *session)
> +{
> +	struct iavf_adapter *adapter = NULL;
> +	struct iavf_security_session *iavf_sess = NULL;
> +	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +	int ret;
> +
> +	adapter =
> IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
> +
> +	/* verify we have a valid session and that it belong to this adapter */
> +	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +		return -EINVAL;
> +
> +	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
> +	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
> +	return ret;
> +}
> +
> +/**
> + * Get ESP trailer from packet as well as calculate the total ESP trailer
> + * length, which include padding, ESP trailer footer and the ICV
> + */
> +static inline struct rte_esp_tail *
> +iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
> +	struct iavf_security_session *s, uint16_t *esp_trailer_length)
> +{
> +	struct rte_esp_tail *esp_trailer;
> +
> +	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
> +	uint16_t offset = 0;
> +
> +	/**
> +	 * The ICV will not be present in TSO packets as this is appended by
> +	 * hardware during segment generation
> +	 */
> +	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))
> +		length -=  s->icv_sz;
> +
> +	*esp_trailer_length = length;
> +
> +	/**
> +	 * Calculate offset in packet to ESP trailer header, this should be
> +	 * total packet length less the size of the ESP trailer plus the ICV
> +	 * length if it is present
> +	 */
> +	offset = rte_pktmbuf_pkt_len(m) - length;
> +
> +	if (m->nb_segs > 1) {
> +		/* find segment which esp trailer is located */
> +		while (m->data_len < offset) {
> +			offset -= m->data_len;
> +			m = m->next;
> +		}
> +	}
> +
> +	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
> +
> +	*esp_trailer_length += esp_trailer->pad_len;
> +
> +	return esp_trailer;
> +}
> +
> +static inline uint16_t
> +iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
> +	struct iavf_security_session *s, uint16_t esp_tlen)
> +{
> +	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
> +	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
> +	uint16_t ol4_len = 0;		/* UDP NATT */
> +	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
> +	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
> +	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
> +
> +	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
> +		ol3_len = m->outer_l3_len;
> +		/**<
> +		 * application provided l3len assumed to include length of
> +		 * ipv4/6 hdr + ext hdrs
> +		 */
> +
> +	if (s->udp_encap.enabled)
> +		ol4_len = sizeof(struct rte_udp_hdr);
> +
> +	l3_len = m->l3_len;
> +	l4_len = m->l4_len;
> +
> +	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
> +			esp_hlen + l3_len + l4_len + esp_tlen);
> +}
> +
> +static int
> +iavf_ipsec_crypto_pkt_metadata_set(void *device,
> +			 struct rte_security_session *session,
> +			 struct rte_mbuf *m, void *params)
> +{
> +	struct rte_eth_dev *ethdev = device;
> +	struct iavf_adapter *adapter =
> +			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +	struct iavf_security_session *iavf_sess = session->sess_private_data;
> +	struct iavf_ipsec_crypto_pkt_metadata *md;
> +	struct rte_esp_tail *esp_tail;
> +	uint64_t *sqn = params;
> +	uint16_t esp_trailer_length;
> +
> +	/* Check we have valid session and is associated with this device */
> +	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +		return -EINVAL;
> +
> +	/* Get dynamic metadata location from mbuf */
> +	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
> +		struct iavf_ipsec_crypto_pkt_metadata *);
> +
> +	/* Set immutatable metadata values from session template */
> +	memcpy(md, &iavf_sess->pkt_metadata_template,
> +		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
> +
> +	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
> +			&esp_trailer_length);
> +
> +	/* Set per packet mutable metadata values */
> +	md->esp_trailer_len = esp_trailer_length;
> +	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
> +				iavf_sess, esp_trailer_length);
> +	md->next_proto = esp_tail->next_proto;
> +
> +	/* If Extended SN in use set the upper 32-bits in metadata */
> +	if (iavf_sess->esn.enabled && sqn != NULL)
> +		md->esn = (uint32_t)(*sqn >> 32);
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
> +		struct virtchnl_ipsec_cap *capability)
> +{
> +	/* Perform pf-vf comms */
> +	struct inline_ipsec_msg *request = NULL, *response = NULL;
> +	size_t request_len, response_len;
> +	int rc;
> +
> +	request_len = sizeof(struct inline_ipsec_msg);
> +
> +	request = rte_malloc("iavf-device-capability-request", request_len, 0);
> +	if (request == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	response_len = sizeof(struct inline_ipsec_msg) +
> +			sizeof(struct virtchnl_ipsec_cap);
> +	response = rte_malloc("iavf-device-capability-response",
> +			response_len, 0);
> +	if (response == NULL) {
> +		rc = -ENOMEM;
> +		goto update_cleanup;
> +	}
> +
> +	/* set msg header params */
> +	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
> +	request->req_id = (uint16_t)0xDEADBEEF;
> +
> +	/* send virtual channel request to add SA to hardware database */
> +	rc = iavf_ipsec_crypto_request(adapter,
> +			(uint8_t *)request, request_len,
> +			(uint8_t *)response, response_len);
> +	if (rc)
> +		goto update_cleanup;
> +
> +	/* verify response id */
> +	if (response->ipsec_opcode != request->ipsec_opcode ||
> +		response->req_id != request->req_id){
> +		rc = -EFAULT;
> +		goto update_cleanup;
> +	}
> +	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
> +
> +update_cleanup:
> +	rte_free(response);
> +	rte_free(request);
> +
> +	return rc;
> +}
> +
> +enum rte_crypto_auth_algorithm auth_maptbl[] = {
> +	/* Hash Algorithm */
> +	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
> +	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
> +	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
> +	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
> +	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
> +	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
> +	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
> +	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
> +	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
> +	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
> +	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
> +	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
> +	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
> +	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
> +	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
> +};
> +
> +static void
> +update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
> +		struct virtchnl_algo_cap *acap)
> +{
> +	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
> +
> +	capability->auth.algo = auth_maptbl[acap->algo_type];
> +	capability->auth.block_size = acap->block_size;
> +
> +	capability->auth.key_size.min = acap->min_key_size;
> +	capability->auth.key_size.max = acap->max_key_size;
> +	capability->auth.key_size.increment = acap->inc_key_size;
> +
> +	capability->auth.digest_size.min = acap->min_digest_size;
> +	capability->auth.digest_size.max = acap->max_digest_size;
> +	capability->auth.digest_size.increment = acap->inc_digest_size;
> +}
> +
> +enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
> +	/* Cipher Algorithm */
> +	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
> +	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
> +	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
> +	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
> +};
> +
> +static void
> +update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
> +	struct virtchnl_algo_cap *acap)
> +{
> +	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> +
> +	capability->cipher.algo = cipher_maptbl[acap->algo_type];
> +
> +	capability->cipher.block_size = acap->block_size;
> +
> +	capability->cipher.key_size.min = acap->min_key_size;
> +	capability->cipher.key_size.max = acap->max_key_size;
> +	capability->cipher.key_size.increment = acap->inc_key_size;
> +
> +	capability->cipher.iv_size.min = acap->min_iv_size;
> +	capability->cipher.iv_size.max = acap->max_iv_size;
> +	capability->cipher.iv_size.increment = acap->inc_iv_size;
> +}
> +
> +enum rte_crypto_aead_algorithm aead_maptbl[] = {
> +	/* AEAD Algorithm */
> +	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
> +	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
> +	[VIRTCHNL_CHACHA20_POLY1305] =
> RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +};
> +
> +static void
> +update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
> +	struct virtchnl_algo_cap *acap)
> +{
> +	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
> +
> +	capability->aead.algo = aead_maptbl[acap->algo_type];
> +
> +	capability->aead.block_size = acap->block_size;
> +
> +	capability->aead.key_size.min = acap->min_key_size;
> +	capability->aead.key_size.max = acap->max_key_size;
> +	capability->aead.key_size.increment = acap->inc_key_size;
> +
> +	capability->aead.aad_size.min = acap->min_aad_size;
> +	capability->aead.aad_size.max = acap->max_aad_size;
> +	capability->aead.aad_size.increment = acap->inc_aad_size;
> +
> +	capability->aead.iv_size.min = acap->min_iv_size;
> +	capability->aead.iv_size.max = acap->max_iv_size;
> +	capability->aead.iv_size.increment = acap->inc_iv_size;
> +
> +	capability->aead.digest_size.min = acap->min_digest_size;
> +	capability->aead.digest_size.max = acap->max_digest_size;
> +	capability->aead.digest_size.increment = acap->inc_digest_size;
> +}
> +
> +/**
> + * Dynamically set crypto capabilities based on virtchannel IPsec
> + * capabilities structure.
> + */
> +int
> +iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
> +		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
> +{
> +	struct rte_cryptodev_capabilities *capabilities;
> +	int i, j, number_of_capabilities = 0, ci = 0;
> +
> +	/* Count the total number of crypto algorithms supported */
> +	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
> +		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
> +
> +	/**
> +	 * Allocate cryptodev capabilities structure for
> +	 * *number_of_capabilities* items plus one item to null terminate the
> +	 * array
> +	 */
> +	capabilities = rte_zmalloc("crypto_cap",
> +		sizeof(struct rte_cryptodev_capabilities) *
> +		(number_of_capabilities + 1), 0);
> +	capabilities[number_of_capabilities].op =
> RTE_CRYPTO_OP_TYPE_UNDEFINED;
> +
> +	/**
> +	 * Iterate over each virtchl crypto capability by crypto type and
> +	 * algorithm.
> +	 */
> +	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
> +		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
> +			switch (vch_cap->cap[i].crypto_type) {
> +			case VIRTCHNL_AUTH:
> +				update_auth_capabilities(&capabilities[ci],
> +					&vch_cap->cap[i].algo_cap_list[j]);
> +				break;
> +			case VIRTCHNL_CIPHER:
> +				update_cipher_capabilities(&capabilities[ci],
> +					&vch_cap->cap[i].algo_cap_list[j]);
> +				break;
> +			case VIRTCHNL_AEAD:
> +				update_aead_capabilities(&capabilities[ci],
> +					&vch_cap->cap[i].algo_cap_list[j]);
> +				break;
> +			default:
> +				capabilities[ci].op =
> +						RTE_CRYPTO_OP_TYPE_UNDEFINED;
> +				break;
> +			}
> +		}
> +	}
> +
> +	iavf_sctx->crypto_capabilities = capabilities;
> +	return 0;
> +}
> +
> +/**
> + * Get security capabilities for device
> + */
> +static const struct rte_security_capability *
> +iavf_ipsec_crypto_capabilities_get(void *device)
> +{
> +	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +	struct iavf_adapter *adapter =
> +		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +	unsigned int i;
> +
> +	static struct rte_security_capability iavf_security_capabilities[] = {
> +		{ /* IPsec Inline Crypto ESP Tunnel Egress */
> +			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +			.ipsec = {
> +				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +				.options = { .udp_encap = 1,
> +						.stats = 1, .esn = 1 },
> +			},
> +			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +		},
> +		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
> +			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +			.ipsec = {
> +				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +				.options = { .udp_encap = 1,
> +						.stats = 1, .esn = 1 },
> +			},
> +			.ol_flags = 0
> +		},
> +		{ /* IPsec Inline Crypto ESP Transport Egress */
> +			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +			.ipsec = {
> +				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +				.options = { .udp_encap = 1, .stats = 1,
> +						.esn = 1 },
> +			},
> +			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +		},
> +		{ /* IPsec Inline Crypto ESP Transport Ingress */
> +			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +			.ipsec = {
> +				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +				.options = { .udp_encap = 1, .stats = 1,
> +						.esn = 1 }
> +			},
> +			.ol_flags = 0
> +		},
> +		{
> +			.action = RTE_SECURITY_ACTION_TYPE_NONE
> +		}
> +	};
> +
> +	/**
> +	 * Update the security capabilities struct with the runtime discovered
> +	 * crypto capabilities, except for last element of the array which is
> +	 * the null terminatation
> +	 */
> +	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
> +			sizeof(iavf_security_capabilities[0])) - 1); i++) {
> +		iavf_security_capabilities[i].crypto_capabilities =
> +			iavf_sctx->crypto_capabilities;
> +	}
> +
> +	return iavf_security_capabilities;
> +}
> +
> +static struct rte_security_ops iavf_ipsec_crypto_ops = {
> +	.session_get_size		= iavf_ipsec_crypto_session_size_get,
> +	.session_create			= iavf_ipsec_crypto_session_create,
> +	.session_update			= iavf_ipsec_crypto_session_update,
> +	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
> +	.session_destroy		= iavf_ipsec_crypto_session_destroy,
> +	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
> +	.get_userdata			= NULL,
> +	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
> +};
> +
> +int
> +iavf_security_ctx_create(struct iavf_adapter *adapter)
> +{
> +	struct rte_security_ctx *sctx;
> +
> +	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
> +	if (sctx == NULL)
> +		return -ENOMEM;
> +
> +	sctx->device = adapter->vf.eth_dev;
> +	sctx->ops = &iavf_ipsec_crypto_ops;
> +	sctx->sess_cnt = 0;
> +
> +	adapter->vf.eth_dev->security_ctx = sctx;
> +
> +	if (adapter->security_ctx == NULL) {
> +		adapter->security_ctx = rte_malloc("iavf_security_ctx",
> +				sizeof(struct iavf_security_ctx), 0);
> +		if (adapter->security_ctx == NULL)
> +			return -ENOMEM;
> +	}
> +
> +	return 0;
> +}
> +
> +int
> +iavf_security_init(struct iavf_adapter *adapter)
> +{
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +	struct rte_mbuf_dynfield pkt_md_dynfield = {
> +		.name = "iavf_ipsec_crypto_pkt_metadata",
> +		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
> +		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
> +	};
> +	struct virtchnl_ipsec_cap capabilities;
> +	int rc;
> +
> +	iavf_sctx->adapter = adapter;
> +
> +	iavf_sctx->pkt_md_offset =
> rte_mbuf_dynfield_register(&pkt_md_dynfield);
> +	if (iavf_sctx->pkt_md_offset < 0)
> +		return iavf_sctx->pkt_md_offset;
> +
> +	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
> +	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
> +	if (rc)
> +		return rc;
> +
> +	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
> +			&capabilities);
> +}
> +
> +int
> +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
> +{
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +
> +	return iavf_sctx->pkt_md_offset;
> +}
> +
> +int
> +iavf_security_ctx_destroy(struct iavf_adapter *adapter)
> +{
> +	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
> +	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +
> +	if (iavf_sctx == NULL)
> +		return -ENODEV;
> +
> +	/* TODO: Add resources cleanup */
> +
> +	/* free and reset security data structures */
> +	rte_free(iavf_sctx);
> +	rte_free(sctx);
> +
> +	iavf_sctx = NULL;
> +	sctx = NULL;
> +
> +	return 0;
> +}
> +
> +int
> +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
> +{
> +	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
> +
> +	/** Capability check for IPsec Crypto */
> +	if (resources && (resources->vf_cap_flags &
> +		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
> +		return true;
> +
> +	return false;
> +}
> +
> +#define IAVF_IPSEC_INSET_ESP (\
> +	IAVF_INSET_ESP_SPI)
> +
> +#define IAVF_IPSEC_INSET_AH (\
> +	IAVF_INSET_AH_SPI)
> +
> +#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
> +	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +	IAVF_INSET_ESP_SPI)
> +
> +#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
> +	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +	IAVF_INSET_ESP_SPI)
> +
> +enum iavf_ipsec_flow_pt_type {
> +	IAVF_PATTERN_ESP = 1,
> +	IAVF_PATTERN_AH,
> +	IAVF_PATTERN_UDP_ESP,
> +};
> +enum iavf_ipsec_flow_pt_ip_ver {
> +	IAVF_PATTERN_IPV4 = 1,
> +	IAVF_PATTERN_IPV6,
> +};
> +
> +#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
> +#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
> +#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
> +
> +static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
> +	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
> +			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
> +	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
> +			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
> +	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
> +			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
> +	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
> +			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
> +	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
> +			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP,
> IAVF_PATTERN_IPV4)},
> +	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
> +			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP,
> IAVF_PATTERN_IPV6)},
> +};
> +
> +struct iavf_ipsec_flow_item {
> +	uint64_t id;
> +	uint8_t is_ipv4;
> +	uint32_t spi;
> +	struct rte_ether_hdr eth_hdr;
> +	union {
> +		struct rte_ipv4_hdr ipv4_hdr;
> +		struct rte_ipv6_hdr ipv6_hdr;
> +	};
> +	struct rte_udp_hdr udp_hdr;
> +};
> +
> +static void
> +parse_eth_item(const struct rte_flow_item_eth *item,
> +		struct rte_ether_hdr *eth)
> +{
> +	memcpy(eth->src_addr.addr_bytes,
> +			item->src.addr_bytes, sizeof(eth->src_addr));
> +	memcpy(eth->dst_addr.addr_bytes,
> +			item->dst.addr_bytes, sizeof(eth->dst_addr));
> +}
> +
> +static void
> +parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
> +		struct rte_ipv4_hdr *ipv4)
> +{
> +	ipv4->src_addr = item->hdr.src_addr;
> +	ipv4->dst_addr = item->hdr.dst_addr;
> +}
> +
> +static void
> +parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
> +		struct rte_ipv6_hdr *ipv6)
> +{
> +	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
> +	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
> +}
> +
> +static void
> +parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr
> *udp)
> +{
> +	udp->dst_port = item->hdr.dst_port;
> +	udp->src_port = item->hdr.src_port;
> +}
> +
> +static int
> +has_security_action(const struct rte_flow_action actions[],
> +	const void **session)
> +{
> +	/* only {SECURITY; END} supported */
> +	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
> +		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
> +		*session = actions[0].conf;
> +		return true;
> +	}
> +	return false;
> +}
> +
> +static struct iavf_ipsec_flow_item *
> +iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		uint32_t type)
> +{
> +	const void *session;
> +	struct iavf_ipsec_flow_item
> +		*ipsec_flow = rte_malloc("security-flow-rule",
> +		sizeof(struct iavf_ipsec_flow_item), 0);
> +	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
> +	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
> +
> +	if (ipsec_flow == NULL)
> +		return NULL;
> +
> +	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
> +
> +	if (pattern[0].spec)
> +		parse_eth_item((const struct rte_flow_item_eth *)
> +				pattern[0].spec, &ipsec_flow->eth_hdr);
> +
> +	switch (p_type) {
> +	case IAVF_PATTERN_ESP:
> +		if (ipsec_flow->is_ipv4) {
> +			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv4_hdr);
> +		} else {
> +			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv6_hdr);
> +		}
> +		ipsec_flow->spi =
> +			((const struct rte_flow_item_esp *)
> +					pattern[2].spec)->hdr.spi;
> +		break;
> +	case IAVF_PATTERN_AH:
> +		if (ipsec_flow->is_ipv4) {
> +			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv4_hdr);
> +		} else {
> +			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv6_hdr);
> +		}
> +		ipsec_flow->spi =
> +			((const struct rte_flow_item_ah *)
> +					pattern[2].spec)->spi;
> +		break;
> +	case IAVF_PATTERN_UDP_ESP:
> +		if (ipsec_flow->is_ipv4) {
> +			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv4_hdr);
> +		} else {
> +			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +					pattern[1].spec,
> +					&ipsec_flow->ipv6_hdr);
> +		}
> +		parse_udp_item((const struct rte_flow_item_udp *)
> +				pattern[2].spec,
> +			&ipsec_flow->udp_hdr);
> +		ipsec_flow->spi =
> +			((const struct rte_flow_item_esp *)
> +					pattern[3].spec)->hdr.spi;
> +		break;
> +	default:
> +		goto flow_cleanup;
> +	}
> +
> +	if (!has_security_action(actions, &session))
> +		goto flow_cleanup;
> +
> +	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
> +			ipsec_flow->spi))
> +		goto flow_cleanup;
> +
> +	return ipsec_flow;
> +
> +flow_cleanup:
> +	rte_free(ipsec_flow);
> +	return NULL;
> +}
> +
> +
> +static struct iavf_flow_parser iavf_ipsec_flow_parser;
> +
> +static int
> +iavf_ipsec_flow_init(struct iavf_adapter *ad)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +	struct iavf_flow_parser *parser;
> +
> +	if (!vf->vf_res)
> +		return -EINVAL;
> +
> +	if (vf->vf_res->vf_cap_flags &
> VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
> +		parser = &iavf_ipsec_flow_parser;
> +	else
> +		return -ENOTSUP;
> +
> +	return iavf_register_parser(parser, ad);
> +}
> +
> +static void
> +iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
> +{
> +	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
> +}
> +
> +static int
> +iavf_ipsec_flow_create(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		void *meta,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_ipsec_flow_item *ipsec_flow = meta;
> +	if (!ipsec_flow) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"NULL rule.");
> +		return -rte_errno;
> +	}
> +
> +	if (ipsec_flow->is_ipv4) {
> +		ipsec_flow->id =
> +			iavf_ipsec_crypto_inbound_security_policy_add(ad,
> +			ipsec_flow->spi,
> +			1,
> +			ipsec_flow->ipv4_hdr.dst_addr,
> +			NULL,
> +			0);
> +	} else {
> +		ipsec_flow->id =
> +			iavf_ipsec_crypto_inbound_security_policy_add(ad,
> +			ipsec_flow->spi,
> +			0,
> +			0,
> +			ipsec_flow->ipv6_hdr.dst_addr,
> +			0);
> +	}
> +
> +	if (ipsec_flow->id < 1) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
> +				"Failed to add SA.");
> +		return -rte_errno;
> +	}
> +
> +	flow->rule = ipsec_flow;
> +
> +	return 0;
> +}
> +
> +static int
> +iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
> +		struct rte_flow *flow,
> +		struct rte_flow_error *error)
> +{
> +	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
> +	if (!ipsec_flow) {
> +		rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				"NULL rule.");
> +		return -rte_errno;
> +	}
> +
> +	iavf_ipsec_crypto_security_policy_delete(ad,
> +			ipsec_flow->is_ipv4, ipsec_flow->id);
> +	rte_free(ipsec_flow);
> +	return 0;
> +}
> +
> +static struct iavf_flow_engine iavf_ipsec_flow_engine = {
> +	.init = iavf_ipsec_flow_init,
> +	.uninit = iavf_ipsec_flow_uninit,
> +	.create = iavf_ipsec_flow_create,
> +	.destroy = iavf_ipsec_flow_destroy,
> +	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
> +};
> +
> +static int
> +iavf_ipsec_flow_parse(struct iavf_adapter *ad,
> +		       struct iavf_pattern_match_item *array,
> +		       uint32_t array_len,
> +		       const struct rte_flow_item pattern[],
> +		       const struct rte_flow_action actions[],
> +		       void **meta,
> +		       struct rte_flow_error *error)
> +{
> +	struct iavf_pattern_match_item *item = NULL;
> +	int ret = -1;
> +
> +	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
> +	if (item && item->meta) {
> +		uint32_t type = (uint64_t)(item->meta);
> +		struct iavf_ipsec_flow_item *fi =
> +				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
> +						pattern, actions, type);
> +		if (fi && meta) {
> +			*meta = fi;
> +			ret = 0;
> +		}
> +	}
> +	return ret;
> +}
> +
> +static struct iavf_flow_parser iavf_ipsec_flow_parser = {
> +	.engine = &iavf_ipsec_flow_engine,
> +	.array = iavf_ipsec_flow_pattern,
> +	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
> +	.parse_pattern_action = iavf_ipsec_flow_parse,
> +	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
> +};
> +
> +RTE_INIT(iavf_ipsec_flow_engine_register)
> +{
> +	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
> +}
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h
> b/drivers/net/iavf/iavf_ipsec_crypto.h
> new file mode 100644
> index 0000000000..4e4c8798ec
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto.h
> @@ -0,0 +1,160 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#ifndef _IAVF_IPSEC_CRYPTO_H_
> +#define _IAVF_IPSEC_CRYPTO_H_
> +
> +#include <rte_security.h>
> +
> +#include "iavf.h"
> +
> +
> +
> +struct iavf_tx_ipsec_desc {
> +	union {
> +		struct {
> +			__le64 qw0;
> +			__le64 qw1;
> +		};
> +		struct {
> +			__le16 l4payload_length;
> +			__le32 esn;
> +			__le16 trailer_length;
> +			u8 type:4;
> +			u8 rsv:1;
> +			u8 udp:1;
> +			u8 ivlen:2;
> +			u8 next_header;
> +			__le16 ipv6_ext_hdr_length;
> +			__le32 said;
> +		} __rte_packed;
> +	};
> +} __rte_packed;
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
> +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
> +			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
> +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL <<
> \
> +			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
> +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
> +			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
> +#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
> +			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
> +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
> +			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
> +			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
> +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
> +			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
> +			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
> +
> +/* Initialization Vector Length type */
> +enum iavf_ipsec_iv_len {
> +	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
> +	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
> +	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
> +	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
> +};
> +
> +
> +/* IPsec Crypto Packet Metaday offload flags */
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
> +
> +/**
> + * Packet metadata data structure used to hold parameters required by the
> iAVF
> + * transmit data path. Parameters set for session by calling
> + * rte_security_set_pkt_metadata() API.
> + */
> +struct iavf_ipsec_crypto_pkt_metadata {
> +	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
> +
> +	uint8_t ol_flags;		/* flags (1B) */
> +	uint8_t len_iv;			/* IV length (2b/1B) */
> +	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
> +	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
> +
> +	uint16_t l4_payload_len;	/* L4 payload length */
> +	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
> +	uint8_t next_proto;		/* Next Protocol (8b/1B) */
> +
> +	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
> +} __rte_packed;
> +
> +/**
> + * Inline IPsec Crypto offload is supported
> + */
> +int
> +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
> +
> +/**
> + * Create security context
> + */
> +int iavf_security_ctx_create(struct iavf_adapter *adapter);
> +
> +/**
> + * Create security context
> + */
> +int iavf_security_init(struct iavf_adapter *adapter);
> +
> +/**
> + * Set security capabilities
> + */
> +int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
> +		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
> +
> +
> +int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
> +
> +/**
> + * Destroy security context
> + */
> +int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
> +
> +/**
> + * Verify that the inline IPsec Crypto action is valid for this device
> + */
> +uint32_t
> +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
> +	const struct rte_security_session *session, uint32_t spi);
> +
> +/**
> + * Add inbound security policy rule to hardware
> + */
> +int
> +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
> +	uint32_t esp_spi,
> +	uint8_t is_v4,
> +	rte_be32_t v4_dst_addr,
> +	uint8_t *v6_dst_addr,
> +	uint8_t drop);
> +
> +/**
> + * Delete inbound security policy rule from hardware
> + */
> +int
> +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
> +	uint8_t is_v4, uint32_t flow_id);
> +
> +int
> +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
> +
> +#endif /* _IAVF_IPSEC_CRYPTO_H_ */
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> new file mode 100644
> index 0000000000..70ce8dd638
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> @@ -0,0 +1,383 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
> +#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
> +
> +static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
> +	{	/* SHA1 HMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
> +				.block_size = 64,
> +				.key_size = {
> +					.min = 1,
> +					.max = 64,
> +					.increment = 1
> +				},
> +				.digest_size = {
> +					.min = 20,
> +					.max = 20,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* SHA256 HMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
> +				.block_size = 64,
> +				.key_size = {
> +					.min = 1,
> +					.max = 64,
> +					.increment = 1
> +				},
> +				.digest_size = {
> +					.min = 32,
> +					.max = 32,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* SHA384 HMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
> +				.block_size = 128,
> +				.key_size = {
> +					.min = 1,
> +					.max = 128,
> +					.increment = 1
> +				},
> +				.digest_size = {
> +					.min = 48,
> +					.max = 48,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* SHA512 HMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
> +				.block_size = 128,
> +				.key_size = {
> +					.min = 1,
> +					.max = 128,
> +					.increment = 1
> +				},
> +				.digest_size = {
> +					.min = 64,
> +					.max = 64,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* MD5 HMAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
> +				.block_size = 64,
> +				.key_size = {
> +					.min = 1,
> +					.max = 64,
> +					.increment = 1
> +				},
> +				.digest_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* AES XCBC MAC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				},
> +				.aad_size = { 0 },
> +				.iv_size = { 0 }
> +			}, }
> +		}, }
> +	},
> +	{	/* AES GCM */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_AES_GCM,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 240,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 8,
> +					.max = 8,
> +					.increment = 0
> +				},
> +			}, }
> +		}, }
> +	},
> +	{	/* ChaCha20-Poly1305 */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 32,
> +					.max = 32,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 240,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +			}, }
> +		}, }
> +	},
> +	{	/* AES CCM */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +			{.aead = {
> +				.algo = RTE_CRYPTO_AEAD_AES_CCM,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.aad_size = {
> +					.min = 0,
> +					.max = 240,
> +					.increment = 1
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				},
> +			}, }
> +		}, }
> +	},
> +	{	/* AES GMAC (AUTH) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES CMAC (AUTH) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.digest_size = {
> +					.min = 8,
> +					.max = 16,
> +					.increment = 4
> +				},
> +				.iv_size = {
> +					.min = 12,
> +					.max = 12,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES CBC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +			{.cipher = {
> +				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.iv_size = {
> +					.min = 16,
> +					.max = 16,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* AES CTR */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +			{.cipher = {
> +				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
> +				.block_size = 16,
> +				.key_size = {
> +					.min = 16,
> +					.max = 32,
> +					.increment = 8
> +				},
> +				.iv_size = {
> +					.min = 8,
> +					.max = 8,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{	/* NULL (AUTH) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +			{.auth = {
> +				.algo = RTE_CRYPTO_AUTH_NULL,
> +				.block_size = 1,
> +				.key_size = {
> +					.min = 0,
> +					.max = 0,
> +					.increment = 0
> +				},
> +				.digest_size = {
> +					.min = 0,
> +					.max = 0,
> +					.increment = 0
> +				},
> +				.iv_size = { 0 }
> +			}, },
> +		}, },
> +	},
> +	{	/* NULL (CIPHER) */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +			{.cipher = {
> +				.algo = RTE_CRYPTO_CIPHER_NULL,
> +				.block_size = 1,
> +				.key_size = {
> +					.min = 0,
> +					.max = 0,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 0,
> +					.max = 0,
> +					.increment = 0
> +				}
> +			}, },
> +		}, }
> +	},
> +	{	/* 3DES CBC */
> +		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +		{.sym = {
> +			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +			{.cipher = {
> +				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
> +				.block_size = 8,
> +				.key_size = {
> +					.min = 24,
> +					.max = 24,
> +					.increment = 0
> +				},
> +				.iv_size = {
> +					.min = 8,
> +					.max = 8,
> +					.increment = 0
> +				}
> +			}, }
> +		}, }
> +	},
> +	{
> +		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +	}
> +};
> +
> +
> +#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
> index 128691aaf1..80438f9f8a 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -27,6 +27,7 @@
> 
>  #include "iavf.h"
>  #include "iavf_rxtx.h"
> +#include "iavf_ipsec_crypto.h"
>  #include "rte_pmd_iavf.h"
> 
>  /* Offset of mbuf dynamic field for protocol extraction's metadata */
> @@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> 
>  uint8_t
>  iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
> @@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
>  		[IAVF_PROTO_XTR_IPV6_FLOW] =
> IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
>  		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
>  		[IAVF_PROTO_XTR_IP_OFFSET] =
> IAVF_RXDID_COMMS_AUX_IP_OFFSET,
> +		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
> +				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
>  	};
> 
>  	return flex_type < RTE_DIM(rxdid_map) ?
> @@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct
> iavf_rx_queue *rxq, uint32_t rxdid)
>  		rxq->rxd_to_pkt_fields =
>  			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
>  		break;
> +	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
> +		rxq->xtr_ol_flag =
> +			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> +		rxq->rxd_to_pkt_fields =
> +			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
> +		break;
>  	case IAVF_RXDID_COMMS_OVS_1:
>  		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
>  		break;
> @@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  		       const struct rte_eth_txconf *tx_conf)
>  {
>  	struct iavf_hw *hw =
> IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +	struct iavf_adapter *adapter =
> +		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>  	struct iavf_info *vf =
>  		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
>  	struct iavf_tx_queue *txq;
> @@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  		return -ENOMEM;
>  	}
> 
> -	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
> +	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
> {
>  		struct virtchnl_vlan_supported_caps *insertion_support =
> -			&vf->vlan_v2_caps.offloads.insertion_support;
> +			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
>  		uint32_t insertion_cap;
> 
>  		if (insertion_support->outer)
> @@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>  	txq->offloads = offloads;
>  	txq->tx_deferred_start = tx_conf->tx_deferred_start;
> 
> +	if (iavf_ipsec_crypto_supported(adapter))
> +		txq->ipsec_crypto_pkt_md_offset =
> +			iavf_security_get_pkt_md_offset(adapter);
> +
>  	/* Allocate software ring */
>  	txq->sw_ring =
>  		rte_zmalloc_socket("iavf tx sw ring",
> @@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
>  #endif
>  }
> 
> +static inline void
> +iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
> +			  volatile union iavf_rx_flex_desc *rxdp)
> +{
> +	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
> +		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
> +
> +	mb->dynfield1[0] = desc->ipsec_said &
> +			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
> +	}
> +
> +static inline void
> +iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
> +			  volatile union iavf_rx_flex_desc *rxdp,
> +			  struct iavf_ipsec_crypto_stats *stats)
> +{
> +	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
> +
> +	if (status1 &
> BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
> +		uint16_t ipsec_status;
> +
> +		mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
> +
> +		ipsec_status = status1 &
> +			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
> +
> +
> +		if (unlikely(ipsec_status !=
> +			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
> +			mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
> +
> +			switch (ipsec_status) {
> +			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
> +				stats->ierrors.sad_miss++;
> +				break;
> +			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
> +				stats->ierrors.not_processed++;
> +				break;
> +			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
> +				stats->ierrors.icv_check++;
> +				break;
> +			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
> +				stats->ierrors.ipsec_length++;
> +				break;
> +			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
> +				stats->ierrors.misc++;
> +				break;
> +}
> +
> +			stats->ierrors.count++;
> +			return;
> +		}
> +
> +		stats->icount++;
> +		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
> +
> +		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
> +			ipsec_status !=
> +				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
> +			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
> +	}
> +}
> +
> +
>  /* Translate the rx descriptor status and error fields to pkt flags */
>  static inline uint64_t
>  iavf_rxd_to_pkt_flags(uint64_t qword)
> @@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
>  		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
> +		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
> +				&rxq->stats.ipsec_crypto);
>  		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
>  		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
>  		rxm->ol_flags |= pkt_flags;
> @@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
> struct rte_mbuf **rx_pkts,
>  		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M
> &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>  		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
> +		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
> +				&rxq->stats.ipsec_crypto);
>  		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
>  		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
> 
> @@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct
> iavf_rx_queue *rxq)
>  			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>  				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
>  			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
> +			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
> +				&rxq->stats.ipsec_crypto);
>  			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
>  			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
>  			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
> @@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field,
> struct rte_mbuf *m)
>  	*field |= cmd;
>  }
> 
> +static inline void
> +iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
> +	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
> +{
> +	uint64_t ipsec_field =
> +		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
> +			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
> +
> +	*field |= ipsec_field;
> +}
> +
> +
>  static inline void
>  iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
>  		const struct rte_mbuf *m)
> @@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile
> uint64_t *qw0,
> 
>  static inline uint16_t
>  iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
> -	struct rte_mbuf *m)
> +	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
>  {
>  	uint64_t segmentation_field = 0;
>  	uint64_t total_length = 0;
> 
> -	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
> +	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> +		total_length = ipsec_md->l4_payload_len;
> +	} else {
> +		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
> 
> -	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> -		total_length -= m->outer_l3_len;
> +		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +			total_length -= m->outer_l3_len;
> +	}
> 
>  #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
>  	if (!m->l4_len || !m->tso_segsz)
> @@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
> 
>  static inline void
>  iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
> -	struct rte_mbuf *m, uint16_t *tlen)
> +	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
> +	uint16_t *tlen)
>  {
>  	volatile struct iavf_tx_context_desc_qws *desc_qws =
>  			(volatile struct iavf_tx_context_desc_qws *)desc;
> @@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct
> iavf_tx_context_desc *desc,
> 
>  	/* fill segmentation field */
>  	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG)) {
> +		/* fill IPsec field */
> +		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> +			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
> +				ipsec_md);
> +
>  		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
> -				m);
> +				m, ipsec_md);
>  	}
> 
>  	/* fill tunnelling field */
> @@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct
> iavf_tx_context_desc *desc,
>  }
> 
> 
> +static inline void
> +iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
> +	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
> +{
> +	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
> +		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
> +		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
> |
> +		((uint64_t)md->esp_trailer_len <<
> +				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
> +
> +	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
> +		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
> +		((uint64_t)md->next_proto <<
> +				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
> +		((uint64_t)(md->len_iv & 0x3) <<
> +				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
> +		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
> +				1ULL : 0ULL) <<
> +				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
> +		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
> +
> +	/**
> +	 * TODO: Pre-calculate this in the Session initialization
> +	 *
> +	 * Calculate IPsec length required in data descriptor func when TSO
> +	 * offload is enabled
> +	 */
> +	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
> +			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
> +			sizeof(struct rte_udp_hdr) : 0);
> +}
> +
>  static inline void
>  iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
>  		struct rte_mbuf *m)
> @@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc
> *desc,
>  }
> 
> 
> +static struct iavf_ipsec_crypto_pkt_metadata *
> +iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
> +		struct rte_mbuf *m)
> +{
> +	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> +		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
> +				struct iavf_ipsec_crypto_pkt_metadata *);
> +
> +	return NULL;
> +}
> +
>  /* TX function */
>  uint16_t
>  iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> @@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> 
>  	for (idx = 0; idx < nb_pkts; idx++) {
>  		volatile struct iavf_tx_desc *ddesc;
> -		uint16_t nb_desc_ctx;
> +		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
> +
> +		uint16_t nb_desc_ctx, nb_desc_ipsec;
>  		uint16_t nb_desc_data, nb_desc_required;
>  		uint16_t tlen = 0, ipseclen = 0;
>  		uint64_t ddesc_template = 0;
> @@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> 
>  		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
> 
> +		/**
> +		 * Get metadata for ipsec crypto from mbuf dynamic fields if
> +		 * security offload is specified.
> +		 */
> +		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
> +
>  		nb_desc_data = mb->nb_segs;
>  		nb_desc_ctx = !!(mb->ol_flags &
>  			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
> RTE_MBUF_F_TX_TUNNEL_MASK));
> +		nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
> 
>  		/**
>  		 * The number of descriptors that must be allocated for
>  		 * a packet equals to the number of the segments of that
>  		 * packet plus the context and ipsec descriptors if needed.
>  		 */
> -		nb_desc_required = nb_desc_data + nb_desc_ctx;
> +		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
> 
>  		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
> 
> @@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  				txe->mbuf = NULL;
>  			}
> 
> -			iavf_fill_context_desc(ctx_desc, mb, &tlen);
> +			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
>  			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
> 
>  			txe->last_id = desc_idx_last;
> @@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			txe = txn;
>  			}
> 
> +		if (nb_desc_ipsec) {
> +			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
> +				(volatile struct iavf_tx_ipsec_desc *)
> +					&txr[desc_idx];
> +
> +			txn = &txe_ring[txe->next_id];
> +			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> 
> +			if (txe->mbuf) {
> +				rte_pktmbuf_free_seg(txe->mbuf);
> +				txe->mbuf = NULL;
> +		}
> +
> +			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
> +
> +			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
> +
> +			txe->last_id = desc_idx_last;
> +			desc_idx = txe->next_id;
> +			txe = txn;
> +		}
> 
>  		mb_seg = mb;
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
> index 1da1278452..b88c81f8f6 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -25,7 +25,8 @@
> 
>  #define IAVF_TX_NO_VECTOR_FLAGS (				 \
>  		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
> -		RTE_ETH_TX_OFFLOAD_TCP_TSO)
> +		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
> +		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_VECTOR_OFFLOAD (				 \
>  		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
> @@ -36,10 +37,10 @@
>  		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
> 
>  #define IAVF_RX_VECTOR_OFFLOAD (				 \
> -		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
> -		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
> -		RTE_ETH_RX_OFFLOAD_VLAN |		 \
> -		RTE_ETH_RX_OFFLOAD_RSS_HASH)
> +		DEV_RX_OFFLOAD_CHECKSUM |		 \
> +		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
> +		DEV_RX_OFFLOAD_VLAN |		 \
> +		DEV_RX_OFFLOAD_RSS_HASH)
> 
>  #define IAVF_VECTOR_PATH 0
>  #define IAVF_VECTOR_OFFLOAD_PATH 1
> @@ -47,23 +48,26 @@
>  #define DEFAULT_TX_RS_THRESH     32
>  #define DEFAULT_TX_FREE_THRESH   32
> 
> -#define IAVF_MIN_TSO_MSS          88
> +#define IAVF_MIN_TSO_MSS          256
>  #define IAVF_MAX_TSO_MSS          9668
>  #define IAVF_TSO_MAX_SEG          UINT8_MAX
>  #define IAVF_TX_MAX_MTU_SEG       8
> 
> -#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |
> 	 \
> +#define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
> +		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
>  		RTE_MBUF_F_TX_TCP_SEG)
> 
> -#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 |		 \
> +#define IAVF_TX_OFFLOAD_MASK (  \
> +		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
>  		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
>  		RTE_MBUF_F_TX_IPV6 |			 \
>  		RTE_MBUF_F_TX_IPV4 |			 \
>  		RTE_MBUF_F_TX_VLAN |		 \
>  		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
> -		RTE_MBUF_F_TX_TCP_SEG)
> +		RTE_MBUF_F_TX_TCP_SEG |		 \
> +		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
>  		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
> @@ -161,6 +165,24 @@ struct iavf_txq_ops {
>  	void (*release_mbufs)(struct iavf_tx_queue *txq);
>  };
> 
> +struct iavf_ipsec_crypto_stats {
> +	uint64_t icount;
> +	uint64_t ibytes;
> +	struct {
> +		uint64_t count;
> +		uint64_t sad_miss;
> +		uint64_t not_processed;
> +		uint64_t icv_check;
> +		uint64_t ipsec_length;
> +		uint64_t misc;
> +	} ierrors;
> +};
> +
> +struct iavf_rx_queue_stats {
> +	uint64_t reserved;
> +	struct iavf_ipsec_crypto_stats ipsec_crypto;
> +};
> +
>  /* Structure associated with each Rx queue. */
>  struct iavf_rx_queue {
>  	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
> @@ -209,6 +231,7 @@ struct iavf_rx_queue {
>  		/* flexible descriptor metadata extraction offload flag */
>  	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
>  				/* handle flexible descriptor by RXDID */
> +	struct iavf_rx_queue_stats stats;
>  	uint64_t offloads;
>  };
> 
> @@ -243,6 +266,7 @@ struct iavf_tx_queue {
>  	uint64_t offloads;
>  	uint16_t next_dd;              /* next to set RS, for VPMD */
>  	uint16_t next_rs;              /* next to check DD,  for VPMD */
> +	uint16_t ipsec_crypto_pkt_md_offset;
> 
>  	bool q_set;                    /* if rx queue has been configured */
>  	bool tx_deferred_start;        /* don't start this queue in dev start */
> @@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
>  	} flex_ts;
>  };
> 
> +/* Rx Flex Descriptor
> + * RxDID Profile ID 24 Inline IPsec
> + * Flex-field 0: RSS hash lower 16-bits
> + * Flex-field 1: RSS hash upper 16-bits
> + * Flex-field 2: Flow ID lower 16-bits
> + * Flex-field 3: Flow ID upper 16-bits
> + * Flex-field 4: Inline IPsec SAID lower 16-bits
> + * Flex-field 5: Inline IPsec SAID upper 16-bits
> + */
> +struct iavf_32b_rx_flex_desc_comms_ipsec {
> +	/* Qword 0 */
> +	u8 rxdid;
> +	u8 mir_id_umb_cast;
> +	__le16 ptype_flexi_flags0;
> +	__le16 pkt_len;
> +	__le16 hdr_len_sph_flex_flags1;
> +
> +	/* Qword 1 */
> +	__le16 status_error0;
> +	__le16 l2tag1;
> +	__le32 rss_hash;
> +
> +	/* Qword 2 */
> +	__le16 status_error1;
> +	u8 flexi_flags2;
> +	u8 ts_low;
> +	__le16 l2tag2_1st;
> +	__le16 l2tag2_2nd;
> +
> +	/* Qword 3 */
> +	__le32 flow_id;
> +	__le32 ipsec_said;
> +};
> +
>  /* Receive Flex Descriptor profile IDs: There are a total
>   * of 64 profiles where profile IDs 0/1 are for legacy; and
>   * profiles 2-63 are flex profiles that can be programmed
> @@ -364,6 +422,7 @@ enum iavf_rxdid {
>  	IAVF_RXDID_COMMS_AUX_TCP	= 21,
>  	IAVF_RXDID_COMMS_OVS_1		= 22,
>  	IAVF_RXDID_COMMS_OVS_2		= 23,
> +	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
>  	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
>  	IAVF_RXDID_LAST			= 63,
>  };
> @@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
> 
>  enum iavf_rx_flex_desc_status_error_1_bits {
>  	/* Note: These are predefined bit offsets */
> -	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
> -	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
> -	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
> +	/* Bits 3:0 are reserved for inline ipsec status */
> +	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
> +	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
> +	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
> +	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
> +	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
> +	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
>  	/* [10:6] reserved */
>  	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
>  	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
> @@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
>  	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
>  };
> 
> +#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
> +	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
> +	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
> +	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
> +	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
> +
> +enum iavf_rx_flex_desc_ipsec_crypto_status {
> +	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
> +	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
> +	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
> +	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
> +	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
> +	/* Reserved */
> +	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
> +};
> +
> +
> 
>  #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
>  #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL <<
> IAVF_TXD_QW1_DTYPE_SHIFT)
> @@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct
> iavf_tx_queue *txq,
>  	case IAVF_TX_DESC_DTYPE_CONTEXT:
>  		name = "Tx_context_desc";
>  		break;
> +	case IAVF_TX_DESC_DTYPE_IPSEC:
> +		name = "Tx_IPsec_desc";
> +		break;
>  	default:
>  		name = "unknown_desc";
>  		break;
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
> index 53d1506677..353521d726 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct
> iavf_adapter *adapter)
> 
>  	return 0;
>  }
> +
> +
> +
> +int
> +iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
> +		uint8_t *msg, size_t msg_len,
> +		uint8_t *resp_msg, size_t resp_msg_len)
> +{
> +	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +	struct iavf_cmd_info args;
> +	int err;
> +
> +	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
> +	args.in_args = msg;
> +	args.in_args_size = msg_len;
> +	args.out_buffer = vf->aq_resp;
> +	args.out_size = IAVF_AQ_BUF_SZ;
> +
> +	err = iavf_execute_vf_cmd(adapter, &args, 1);
> +	if (err) {
> +		PMD_DRV_LOG(ERR, "fail to execute command %s",
> +				"OP_INLINE_IPSEC_CRYPTO");
> +		return err;
> +	}
> +
> +	memcpy(resp_msg, args.out_buffer, resp_msg_len);
> +
> +	return 0;
> +}
> diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
> index 36a82e3faa..5eb230f687 100644
> --- a/drivers/net/iavf/meson.build
> +++ b/drivers/net/iavf/meson.build
> @@ -5,7 +5,7 @@
>  cflags += ['-Wno-strict-aliasing']
> 
>  includes += include_directories('../../common/iavf')
> -deps += ['common_iavf']
> +deps += ['common_iavf', 'security', 'cryptodev']
> 
>  sources = files(
>          'iavf_ethdev.c',
> @@ -15,6 +15,7 @@ sources = files(
>          'iavf_fdir.c',
>          'iavf_hash.c',
>          'iavf_tm.c',
> +        'iavf_ipsec_crypto.c',
>  )
> 
>  if arch_subdir == 'x86'
> diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
> index 3a045040f1..7426eb9be3 100644
> --- a/drivers/net/iavf/rte_pmd_iavf.h
> +++ b/drivers/net/iavf/rte_pmd_iavf.h
> @@ -92,6 +92,7 @@ extern uint64_t
> rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> 
>  /**
>   * The mbuf dynamic field pointer for flexible descriptor's extraction
> metadata.
> diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
> index f3efe756cf..97f0f87311 100644
> --- a/drivers/net/iavf/version.map
> +++ b/drivers/net/iavf/version.map
> @@ -13,4 +13,7 @@ EXPERIMENTAL {
>  	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>  	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>  	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +
> +	# added in 21.11
> +	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
>  };
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec inline crypto support
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (6 preceding siblings ...)
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
@ 2021-10-27  0:36   ` Zhang, Qi Z
  2021-10-28 14:47   ` Ferruh Yigit
  8 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2021-10-27  0:36 UTC (permalink / raw)
  To: Nicolau, Radu
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Wu, Jingjing, Xing, Beilei,
	Richardson, Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Tuesday, October 26, 2021 9:57 PM
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha,
> Abhijit <abhijit.sinha@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Zhang,
> Qi Z <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Richardson,
> Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v12 0/7] iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and tunnel over IPv4
> and IPv6, as well as supporting the offload for ESP over UDP, and inconjunction
> with TSO for UDP and TCP flows.
> 
> Radu Nicolau (7):
>   common/iavf: add iAVF IPsec inline crypto support
>   net/iavf: rework tx path
>   net/iavf: add support for asynchronous virt channel messages
>   net/iavf: add iAVF IPsec inline crypto support
>   net/iavf: add xstats support for inline IPsec crypto
>   net/iavf: add watchdog for VFLR
>   net/iavf: update doc with inline crypto support
> 
>  doc/guides/nics/features/iavf.ini             |    2 +
>  doc/guides/nics/intel_vf.rst                  |   10 +
>  doc/guides/rel_notes/release_21_11.rst        |    1 +
>  drivers/common/iavf/iavf_type.h               |    1 +
>  drivers/common/iavf/virtchnl.h                |   17 +-
>  drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
>  drivers/net/iavf/iavf.h                       |   52 +-
>  drivers/net/iavf/iavf_ethdev.c                |  219 +-
>  drivers/net/iavf/iavf_generic_flow.c          |   15 +
>  drivers/net/iavf/iavf_generic_flow.h          |    2 +
>  drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
>  drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
>  .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
>  drivers/net/iavf/iavf_rxtx.c                  |  710 ++++--
>  drivers/net/iavf/iavf_rxtx.h                  |  212 +-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
>  drivers/net/iavf/iavf_vchnl.c                 |  167 +-
>  drivers/net/iavf/meson.build                  |    3 +-
>  drivers/net/iavf/rte_pmd_iavf.h               |    1 +
>  drivers/net/iavf/version.map                  |    3 +
>  20 files changed, 4098 insertions(+), 317 deletions(-)  create mode 100644
> drivers/common/iavf/virtchnl_inline_ipsec.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> 
> --
> v2: small updates and fixes in the flow related section
> v3: split the huge patch and address feedback
> v4: small changes due to dependencies changes
> v5: updated the watchdow patch
> v6: rebased and updated the common section
> v7: fixed TSO issue and disabled watchdog by default
> v8: rebased to next-net-intel and added doc updates
> v9: fixed IV len for AEAD and GMAC
> v10: removed blank lines at EOF
> v11: rebased patchset
> v12: rebased patchset to RC1
> 
> 2.25.1

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path
  2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-27  0:43     ` Zhang, Qi Z
  0 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2021-10-27  0:43 UTC (permalink / raw)
  To: Nicolau, Radu, Wu, Jingjing, Xing, Beilei, Richardson,  Bruce,
	Ananyev, Konstantin
  Cc: dev, Doherty, Declan, Sinha, Abhijit



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Tuesday, October 26, 2021 9:57 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha,
> Abhijit <abhijit.sinha@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Nicolau,
> Radu <radu.nicolau@intel.com>
> Subject: [PATCH v12 2/7] net/iavf: rework tx path
> 
> Rework the TX path and TX descriptor usage in order to allow for better use of
> oflload flags and to facilitate enabling of inline crypto offload feature.
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c         | 538 ++++++++++++++++-----------
>  drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
>  3 files changed, 431 insertions(+), 234 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 52d919ca1b..128691aaf1 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -1054,27 +1054,31 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb,
> volatile union iavf_rx_desc *rxdp)
> 
>  static inline void
>  iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
> -			  volatile union iavf_rx_flex_desc *rxdp,
> -			  uint8_t rx_flags)
> +			  volatile union iavf_rx_flex_desc *rxdp)
>  {
> -	uint16_t vlan_tci = 0;
> -
> -	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
> -	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
> -	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
> -		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
> +	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
> +		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
> +		mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
> RTE_MBUF_F_RX_VLAN_STRIPPED;
> +		mb->vlan_tci =
> +			rte_le_to_cpu_16(rxdp->wb.l2tag1);
> +	} else {
> +		mb->vlan_tci = 0;
> +	}
> 
>  #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
> -	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
> -	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
> -	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
> -		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
> -#endif
> -
> -	if (vlan_tci) {
> -		mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
> RTE_MBUF_F_RX_VLAN_STRIPPED;
> -		mb->vlan_tci = vlan_tci;
> +	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
> +	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
> +		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
> RTE_MBUF_F_RX_QINQ |
> +				RTE_MBUF_F_RX_VLAN_STRIPPED |
> RTE_MBUF_F_RX_VLAN;
> +		mb->vlan_tci_outer = mb->vlan_tci;
> +		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
> +		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
> +			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
> +			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
> +	} else {
> +		mb->vlan_tci_outer = 0;
>  	}
> +#endif
>  }
> 
>  /* Translate the rx descriptor status and error fields to pkt flags */ @@
> -1394,7 +1398,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
>  		rxm->ol_flags = 0;
>  		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> -		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
> +		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
>  		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
>  		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
>  		rxm->ol_flags |= pkt_flags;
> @@ -1536,7 +1540,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
> struct rte_mbuf **rx_pkts,
>  		first_seg->ol_flags = 0;
>  		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M
> &
>  			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> -		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
> +		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
>  		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
>  		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
> 
> @@ -1774,7 +1778,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct
> iavf_rx_queue *rxq)
> 
>  			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>  				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> -			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
> +			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
>  			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
>  			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
>  			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
> @@ -2068,190 +2072,302 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
>  	return 0;
>  }
> 
> -/* Check if the context descriptor is needed for TX offloading */
> +
> +
> +static inline void
> +iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf
> +*m) {
> +	uint64_t cmd = 0;
> +
> +	/* TSO enabled */
> +	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))
> +		cmd = IAVF_TX_CTX_DESC_TSO <<
> IAVF_TXD_DATA_QW1_CMD_SHIFT;
> +
> +	/* Time Sync - Currently not supported */
> +
> +	/* Outer L2 TAG 2 Insertion - Currently not supported */
> +	/* Inner L2 TAG 2 Insertion - Currently not supported */
> +
> +	*field |= cmd;
> +}
> +
> +static inline void
> +iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
> +		const struct rte_mbuf *m)
> +{
> +	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
> +	uint64_t eip_len = 0;
> +	uint64_t eip_noinc = 0;
> +	/* Default - IP_ID is increment in each segment of LSO */
> +
> +	switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
> RTE_MBUF_F_TX_OUTER_IPV6 |
> +			RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
> +	case RTE_MBUF_F_TX_OUTER_IPV4:
> +		eip_typ =
> IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
> +		eip_len = m->outer_l3_len >> 2;
> +	break;
> +	case RTE_MBUF_F_TX_OUTER_IPV4 |
> RTE_MBUF_F_TX_OUTER_IP_CKSUM:
> +		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
> +		eip_len = m->outer_l3_len >> 2;
> +	break;
> +	case RTE_MBUF_F_TX_OUTER_IPV6:
> +		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
> +		eip_len = m->outer_l3_len >> 2;
> +	break;
> +	}
> +
> +	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
> +		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
> +		eip_noinc <<
> IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
> +}
> +
>  static inline uint16_t
> -iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
> +iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
> +	struct rte_mbuf *m)
>  {
> -	if (flags & RTE_MBUF_F_TX_TCP_SEG)
> -		return 1;
> -	if (flags & RTE_MBUF_F_TX_VLAN &&
> -	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
> -		return 1;
> -	return 0;
> +	uint64_t segmentation_field = 0;
> +	uint64_t total_length = 0;
> +
> +	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
> +
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +		total_length -= m->outer_l3_len;
> +
> +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
> +	if (!m->l4_len || !m->tso_segsz)
> +		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
> +			 m->l4_len, m->tso_segsz);
> +	if (m->tso_segsz < 88)
> +		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than
> minimum %d",
> +			m->tso_segsz, 88);
> +#endif
> +	segmentation_field =
> +		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
> +				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
> +		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
> +				IAVF_TXD_CTX_QW1_MSS_MASK);
> +
> +	*field |= segmentation_field;
> +
> +	return total_length;
>  }
> 
> +
> +struct iavf_tx_context_desc_qws {
> +	__le64 qw0;
> +	__le64 qw1;
> +};
> +
>  static inline void
> -iavf_txd_enable_checksum(uint64_t ol_flags,
> -			uint32_t *td_cmd,
> -			uint32_t *td_offset,
> -			union iavf_tx_offload tx_offload)
> +iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
> +	struct rte_mbuf *m, uint16_t *tlen)
>  {
> +	volatile struct iavf_tx_context_desc_qws *desc_qws =
> +			(volatile struct iavf_tx_context_desc_qws *)desc;
> +	/* fill descriptor type field */
> +	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
> +
> +	/* fill command field */
> +	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
> +
> +	/* fill segmentation field */
> +	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG)) {
> +		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
> +				m);
> +	}
> +
> +	/* fill tunnelling field */
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
> +	else
> +		desc_qws->qw0 = 0;
> +
> +	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
> +	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1); }
> +
> +
> +static inline void
> +iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
> +		struct rte_mbuf *m)
> +{
> +	uint64_t command = 0;
> +	uint64_t offset = 0;
> +	uint64_t l2tag1 = 0;
> +
> +	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
> +
> +	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
> +
> +	/* Descriptor based VLAN insertion */
> +	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
> +		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
> +		l2tag1 |= m->vlan_tci;
> +	}
> +
>  	/* Set MACLEN */
> -	*td_offset |= (tx_offload.l2_len >> 1) <<
> -		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> -
> -	/* Enable L3 checksum offloads */
> -	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
> -		*td_offset |= (tx_offload.l3_len >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> -	}
> -
> -	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
> -		*td_offset |= (tx_offload.l4_len >> 2) <<
> +	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> +
> +	/* Enable L3 checksum offloading inner */
> +	if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4))
> {
> +		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
> +		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> +	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
> +		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
> +		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> +	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
> +		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
> +		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
> +	}
> +
> +	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
> +		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
> +		offset |= (m->l4_len >> 2) <<
>  			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
> -		return;
>  	}
> 
>  	/* Enable L4 checksum offloads */
> -	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
> +	switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
>  	case RTE_MBUF_F_TX_TCP_CKSUM:
> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
> -		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
> +		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
> +		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
> +				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
>  		break;
>  	case RTE_MBUF_F_TX_SCTP_CKSUM:
> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
> -		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
> +		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
> +		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
> +				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
>  		break;
>  	case RTE_MBUF_F_TX_UDP_CKSUM:
> -		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
> -		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
> -			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
> -		break;
> -	default:
> +		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
> +		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
> +				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
>  		break;
>  	}
> +
> +	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
> +		IAVF_TXD_DATA_QW1_CMD_SHIFT) &
> IAVF_TXD_DATA_QW1_CMD_MASK) |
> +		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
> +		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
> +		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
>  }
> 
> -/* set TSO context descriptor
> - * support IP -> L4 and IP -> IP -> L4
> - */
> -static inline uint64_t
> -iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
> +static inline void
> +iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t
> +value)

above static function never be used, removed during merge.

>  {
> -	uint64_t ctx_desc = 0;
> -	uint32_t cd_cmd, hdr_len, cd_tso_len;
> -
> -	if (!tx_offload.l4_len) {
> -		PMD_TX_LOG(DEBUG, "L4 length set to 0");
> -		return ctx_desc;
> +	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
> +			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
>  	}
> 
> -	hdr_len = tx_offload.l2_len +
> -		  tx_offload.l3_len +
> -		  tx_offload.l4_len;
> +static inline void
> +iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
> +	struct rte_mbuf *m, uint64_t desc_template,
> +	uint16_t tlen, uint16_t ipseclen)
> +{
> +	uint32_t hdrlen = m->l2_len;
> +	uint32_t bufsz = 0;
> 
> -	cd_cmd = IAVF_TX_CTX_DESC_TSO;
> -	cd_tso_len = mbuf->pkt_len - hdr_len;
> -	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
> -		     ((uint64_t)cd_tso_len <<
> IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
> -		     ((uint64_t)mbuf->tso_segsz <<
> IAVF_TXD_CTX_QW1_MSS_SHIFT);
> +	/* fill data descriptor qw1 from template */
> +	desc->cmd_type_offset_bsz = desc_template;
> 
> -	return ctx_desc;
> -}
> +	/* set data buffer address */
> +	desc->buffer_addr = rte_mbuf_data_iova(m);
> 
> -/* Construct the tx flags */
> -static inline uint64_t
> -iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
> -	       uint32_t td_tag)
> -{
> -	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
> -				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
> -				((uint64_t)td_offset <<
> -				 IAVF_TXD_QW1_OFFSET_SHIFT) |
> -				((uint64_t)size  <<
> -				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
> -				((uint64_t)td_tag  <<
> -				 IAVF_TXD_QW1_L2TAG1_SHIFT));
> +	/* calculate data buffer size less set header lengths */
> +	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
> +			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))) {
> +		hdrlen += m->outer_l3_len;
> +		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
> +			hdrlen += m->l3_len + m->l4_len;
> +		else
> +			hdrlen += m->l3_len;
> +		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> +			hdrlen += ipseclen;
> +		bufsz = hdrlen + tlen;
> +	} else {
> +		bufsz = m->data_len;
> +	}
> +
> +	/* set data buffer size */
> +	desc->cmd_type_offset_bsz |=
> +		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
> +		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
> +
> +	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
> +	desc->cmd_type_offset_bsz =
> +rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
>  }
> 
> +
>  /* TX function */
>  uint16_t
>  iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> {
> -	volatile struct iavf_tx_desc *txd;
> -	volatile struct iavf_tx_desc *txr;
> -	struct iavf_tx_queue *txq;
> -	struct iavf_tx_entry *sw_ring;
> +	struct iavf_tx_queue *txq = tx_queue;
> +	volatile struct iavf_tx_desc *txr = txq->tx_ring;
> +	struct iavf_tx_entry *txe_ring = txq->sw_ring;
>  	struct iavf_tx_entry *txe, *txn;
> -	struct rte_mbuf *tx_pkt;
> -	struct rte_mbuf *m_seg;
> -	uint16_t tx_id;
> -	uint16_t nb_tx;
> -	uint32_t td_cmd;
> -	uint32_t td_offset;
> -	uint32_t td_tag;
> -	uint64_t ol_flags;
> -	uint16_t nb_used;
> -	uint16_t nb_ctx;
> -	uint16_t tx_last;
> -	uint16_t slen;
> -	uint64_t buf_dma_addr;
> -	uint16_t cd_l2tag2 = 0;
> -	union iavf_tx_offload tx_offload = {0};
> -
> -	txq = tx_queue;
> -	sw_ring = txq->sw_ring;
> -	txr = txq->tx_ring;
> -	tx_id = txq->tx_tail;
> -	txe = &sw_ring[tx_id];
> +	struct rte_mbuf *mb, *mb_seg;
> +	uint16_t desc_idx, desc_idx_last;
> +	uint16_t idx;
> +
> 
>  	/* Check if the descriptor ring needs to be cleaned. */
>  	if (txq->nb_free < txq->free_thresh)
> -		(void)iavf_xmit_cleanup(txq);
> +		iavf_xmit_cleanup(txq);
> +
> +	desc_idx = txq->tx_tail;
> +	txe = &txe_ring[desc_idx];
> +
> +#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
> +		iavf_dump_tx_entry_ring(txq);
> +		iavf_dump_tx_desc_ring(txq);
> +#endif
> +
> 
> -	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> -		td_cmd = 0;
> -		td_tag = 0;
> -		td_offset = 0;
> +	for (idx = 0; idx < nb_pkts; idx++) {
> +		volatile struct iavf_tx_desc *ddesc;
> +		uint16_t nb_desc_ctx;
> +		uint16_t nb_desc_data, nb_desc_required;
> +		uint16_t tlen = 0, ipseclen = 0;
> +		uint64_t ddesc_template = 0;
> +		uint64_t ddesc_cmd = 0;
> +
> +		mb = tx_pkts[idx];
> 
> -		tx_pkt = *tx_pkts++;
>  		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
> 
> -		ol_flags = tx_pkt->ol_flags;
> -		tx_offload.l2_len = tx_pkt->l2_len;
> -		tx_offload.l3_len = tx_pkt->l3_len;
> -		tx_offload.l4_len = tx_pkt->l4_len;
> -		tx_offload.tso_segsz = tx_pkt->tso_segsz;
> -		/* Calculate the number of context descriptors needed. */
> -		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
> +		nb_desc_data = mb->nb_segs;
> +		nb_desc_ctx = !!(mb->ol_flags &
> +			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
> +RTE_MBUF_F_TX_TUNNEL_MASK));
> 
> -		/* The number of descriptors that must be allocated for
> +		/**
> +		 * The number of descriptors that must be allocated for
>  		 * a packet equals to the number of the segments of that
> -		 * packet plus 1 context descriptor if needed.
> +		 * packet plus the context and ipsec descriptors if needed.
>  		 */
> -		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> -		tx_last = (uint16_t)(tx_id + nb_used - 1);
> +		nb_desc_required = nb_desc_data + nb_desc_ctx;
> +
> +		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
> 
> -		/* Circular ring */
> -		if (tx_last >= txq->nb_tx_desc)
> -			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
> +		/* wrap descriptor ring */
> +		if (desc_idx_last >= txq->nb_tx_desc)
> +			desc_idx_last =
> +				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
> 
> -		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
> -			   " tx_first=%u tx_last=%u",
> -			   txq->port_id, txq->queue_id, tx_id, tx_last);
> +		PMD_TX_LOG(DEBUG,
> +			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
> +			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
> 
> -		if (nb_used > txq->nb_free) {
> +		if (nb_desc_required > txq->nb_free) {
>  			if (iavf_xmit_cleanup(txq)) {
> -				if (nb_tx == 0)
> +				if (idx == 0)
>  					return 0;
>  				goto end_of_tx;
>  			}
> -			if (unlikely(nb_used > txq->rs_thresh)) {
> -				while (nb_used > txq->nb_free) {
> +			if (unlikely(nb_desc_required > txq->rs_thresh)) {
> +				while (nb_desc_required > txq->nb_free) {
>  					if (iavf_xmit_cleanup(txq)) {
> -						if (nb_tx == 0)
> +						if (idx == 0)
>  							return 0;
>  						goto end_of_tx;
>  					}
> @@ -2259,122 +2375,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			}
>  		}
> 
> -		/* Descriptor based VLAN insertion */
> -		if (ol_flags & RTE_MBUF_F_TX_VLAN &&
> -		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
> -			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
> -			td_tag = tx_pkt->vlan_tci;
> -		}
> -
> -		/* According to datasheet, the bit2 is reserved and must be
> -		 * set to 1.
> -		 */
> -		td_cmd |= 0x04;
> -
> -		/* Enable checksum offloading */
> -		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
> -			iavf_txd_enable_checksum(ol_flags, &td_cmd,
> -						&td_offset, tx_offload);
> +		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
> 
> -		if (nb_ctx) {
>  			/* Setup TX context descriptor if required */
> -			uint64_t cd_type_cmd_tso_mss =
> -				IAVF_TX_DESC_DTYPE_CONTEXT;
> -			volatile struct iavf_tx_context_desc *ctx_txd =
> +		if (nb_desc_ctx) {
> +			volatile struct iavf_tx_context_desc *ctx_desc =
>  				(volatile struct iavf_tx_context_desc *)
> -							&txr[tx_id];
> +					&txr[desc_idx];
> 
>  			/* clear QW0 or the previous writeback value
>  			 * may impact next write
>  			 */
> -			*(volatile uint64_t *)ctx_txd = 0;
> +			*(volatile uint64_t *)ctx_desc = 0;
> 
> -			txn = &sw_ring[txe->next_id];
> +			txn = &txe_ring[txe->next_id];
>  			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> +
>  			if (txe->mbuf) {
>  				rte_pktmbuf_free_seg(txe->mbuf);
>  				txe->mbuf = NULL;
>  			}
> 
> -			/* TSO enabled */
> -			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
> -				cd_type_cmd_tso_mss |=
> -					iavf_set_tso_ctx(tx_pkt, tx_offload);
> +			iavf_fill_context_desc(ctx_desc, mb, &tlen);
> +			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
> 
> -			if (ol_flags & RTE_MBUF_F_TX_VLAN &&
> -			    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
> {
> -				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
> -					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
> -				cd_l2tag2 = tx_pkt->vlan_tci;
> +			txe->last_id = desc_idx_last;
> +			desc_idx = txe->next_id;
> +			txe = txn;
>  			}
> 
> -			ctx_txd->type_cmd_tso_mss =
> -				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
> -			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
> 
> -			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
> -			txe->last_id = tx_last;
> -			tx_id = txe->next_id;
> -			txe = txn;
> -		}
> 
> -		m_seg = tx_pkt;
> +		mb_seg = mb;
> +
>  		do {
> -			txd = &txr[tx_id];
> -			txn = &sw_ring[txe->next_id];
> +			ddesc = (volatile struct iavf_tx_desc *)
> +					&txr[desc_idx];
> +
> +			txn = &txe_ring[txe->next_id];
> +			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> 
>  			if (txe->mbuf)
>  				rte_pktmbuf_free_seg(txe->mbuf);
> -			txe->mbuf = m_seg;
> -
> -			/* Setup TX Descriptor */
> -			slen = m_seg->data_len;
> -			buf_dma_addr = rte_mbuf_data_iova(m_seg);
> -			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
> -			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
> -								  td_offset,
> -								  slen,
> -								  td_tag);
> -
> -			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
> -			txe->last_id = tx_last;
> -			tx_id = txe->next_id;
> +
> +			txe->mbuf = mb_seg;
> +			iavf_fill_data_desc(ddesc, mb_seg,
> +					ddesc_template, tlen, ipseclen);
> +
> +			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +
> +			txe->last_id = desc_idx_last;
> +			desc_idx = txe->next_id;
>  			txe = txn;
> -			m_seg = m_seg->next;
> -		} while (m_seg);
> +			mb_seg = mb_seg->next;
> +		} while (mb_seg);
> 
>  		/* The last packet data descriptor needs End Of Packet (EOP) */
> -		td_cmd |= IAVF_TX_DESC_CMD_EOP;
> -		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
> -		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
> +		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
> +
> +		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
> +		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
> 
>  		if (txq->nb_used >= txq->rs_thresh) {
>  			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
>  				   "%4u (port=%d queue=%d)",
> -				   tx_last, txq->port_id, txq->queue_id);
> +				   desc_idx_last, txq->port_id, txq->queue_id);
> 
> -			td_cmd |= IAVF_TX_DESC_CMD_RS;
> +			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
> 
>  			/* Update txq RS bit counters */
>  			txq->nb_used = 0;
>  		}
> 
> -		txd->cmd_type_offset_bsz |=
> -			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
> -					 IAVF_TXD_QW1_CMD_SHIFT);
> -		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
> +		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
> +				IAVF_TXD_DATA_QW1_CMD_SHIFT);
> +
> +		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
>  	}
> 
>  end_of_tx:
>  	rte_wmb();
> 
>  	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
> -		   txq->port_id, txq->queue_id, tx_id, nb_tx);
> +		   txq->port_id, txq->queue_id, desc_idx, idx);
> 
> -	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
> -	txq->tx_tail = tx_id;
> +	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
> +	txq->tx_tail = desc_idx;
> 
> -	return nb_tx;
> +	return idx;
>  }
> 
>  /* Check if the packet with vlan user priority is transmitted in the diff --git
> a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> 84351011f1..1da1278452 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -403,6 +403,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
>  	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */  };
> 
> +
> +#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
> +#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL <<
> IAVF_TXD_QW1_DTYPE_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
> +#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL <<
> IAVF_TXD_DATA_QW1_CMD_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
> +#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
> +					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT
> 	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
> +#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
> +	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
> +	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT +
> IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
> +#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
> +	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
> +	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT +
> IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
> +#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
> +	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
> +	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
> +#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
> +	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
> +#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
> +	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
> +#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
> +	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
> +#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
> +	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
> +
> +#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
> +#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
> +	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
> +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
> +	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
> +#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
> +	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
> +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
> +	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
> +#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
> +	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
> +#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
> +	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
> +
> +enum iavf_tx_ctx_desc_tunnel_external_ip_type {
> +	IAVF_TX_CTX_DESC_EIPT_NONE,
> +	IAVF_TX_CTX_DESC_EIPT_IPV6,
> +	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
> +	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
> +};
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
> +
> +enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
> +	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
> +	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
> +	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
> +};
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
> +
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
> +#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
> +
> +#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
> +#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
> +
> +
> +#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
> +
> +/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
> +#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
> +
> +
>  /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
>  #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
> 
> @@ -553,9 +659,10 @@ void iavf_dump_tx_descriptor(const struct
> iavf_tx_queue *txq,
>  	const volatile struct iavf_tx_desc *tx_desc = desc;
>  	enum iavf_tx_desc_dtype_value type;
> 
> -	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
> -		tx_desc->cmd_type_offset_bsz &
> -		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
> +
> +	type = (enum iavf_tx_desc_dtype_value)
> +		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
> +			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
>  	switch (type) {
>  	case IAVF_TX_DESC_DTYPE_DATA:
>  		name = "Tx_data_desc";
> @@ -569,8 +676,8 @@ void iavf_dump_tx_descriptor(const struct
> iavf_tx_queue *txq,
>  	}
> 
>  	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1:
> 0x%016"PRIx64"\n",
> -	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
> -	       tx_desc->cmd_type_offset_bsz);
> +		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
> +		tx_desc->cmd_type_offset_bsz);
>  }
> 
>  #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \ diff --git
> a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> index d4f4d705b7..6d42ae9373 100644
> --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> @@ -363,10 +363,12 @@ static inline void  flex_desc_to_ptype_v(__m128i
> descs[4], struct rte_mbuf **rx_pkts,
>  		     const uint32_t *type_table)
>  {
> -	const __m128i ptype_mask = _mm_set_epi16(0,
> IAVF_RX_FLEX_DESC_PTYPE_M,
> -						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
> -						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
> -						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
> +	const __m128i ptype_mask = _mm_set_epi16(
> +					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
> +					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
> +					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
> +					IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
> +
>  	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
>  	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
>  	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec inline crypto support
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
                     ` (7 preceding siblings ...)
  2021-10-27  0:36   ` [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
@ 2021-10-28 14:47   ` Ferruh Yigit
  8 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-10-28 14:47 UTC (permalink / raw)
  To: Radu Nicolau
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Phil Yang,
	Honnappa Nagarahalli

On 10/26/2021 2:56 PM, Radu Nicolau wrote:
> Add support for inline crypto for IPsec, for ESP transport and
> tunnel over IPv4 and IPv6, as well as supporting the offload for
> ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
> 
> Radu Nicolau (7):
>    common/iavf: add iAVF IPsec inline crypto support
>    net/iavf: rework tx path
>    net/iavf: add support for asynchronous virt channel messages
>    net/iavf: add iAVF IPsec inline crypto support
>    net/iavf: add xstats support for inline IPsec crypto
>    net/iavf: add watchdog for VFLR
>    net/iavf: update doc with inline crypto support
> 

Hi Radu,

'./devtools/checkpatches.sh' complains about 'rte_atomicNN_xxx' usage:

Warning in drivers/net/iavf/iavf.h:
Using rte_atomicNN_xxx


We prefer C11 atomic builtins in new code, '__atomic_*',
can you please update to them.

There is some documentation around it, although it doesn't have much detail:
https://doc.dpdk.org/guides/prog_guide/writing_efficient_code.html#locks-and-atomic-operations


And since you will send a new version, there are a few warnings on
'./devtools/checkpatches.sh' & './devtools/check-git-log.sh', can you please
check them too?

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (14 preceding siblings ...)
  2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
@ 2021-10-28 15:52 ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
                     ` (6 more replies)
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
  16 siblings, 7 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   61 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  716 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  212 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  169 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4113 insertions(+), 319 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC
v10: removed blank lines at EOF
v11: rebased patchset
v12: rebased patchset to RC1
v13: fixed coding style issues

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework tx path Radu Nicolau
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 2/7] net/iavf: rework tx path
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the TX path and TX descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 544 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 437 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 52d919ca1b..9663e6514c 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,34 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
+				RTE_MBUF_F_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
+				RTE_MBUF_F_RX_QINQ |
+				RTE_MBUF_F_RX_VLAN_STRIPPED |
+				RTE_MBUF_F_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1401,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1543,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1781,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2075,305 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
+			RTE_MBUF_F_TX_OUTER_IPV6 |
+			RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
+	case RTE_MBUF_F_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & RTE_MBUF_F_TX_TCP_SEG)
-		return 1;
-	if (flags & RTE_MBUF_F_TX_VLAN &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
+{
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+	switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
 	case RTE_MBUF_F_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
+
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+					RTE_MBUF_F_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 
-	return ctx_desc;
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
-}
 
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
+					RTE_MBUF_F_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		if (nb_used > txq->nb_free) {
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
+
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2381,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-			    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 84351011f1..1da1278452 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -403,6 +403,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -553,9 +659,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -569,8 +676,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index d4f4d705b7..1bac59bf0e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask =
+			_mm_set_epi16(IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework tx path Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  25 +++++-
 drivers/net/iavf/iavf_vchnl.c | 140 +++++++++++++++++++++-------------
 2 files changed, 110 insertions(+), 55 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 12f541f539..8bd2b830ee 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	uint32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -339,15 +340,35 @@ _clear_cmd(struct iavf_info *vf)
 static inline int
 _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 {
-	int ret = rte_atomic32_cmpset((volatile uint32_t *)&vf->pend_cmd,
-		VIRTCHNL_OP_UNKNOWN, ops);
+	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
+			&op_unk, &ops,
+			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
 
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	__atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
+			&op_unk, &ops,
+			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	__atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index bb65dbf04f..df15e589d4 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,42 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc ==
+					VIRTCHNL_OP_INLINE_IPSEC_CRYPTO &&
+					imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					uint32_t cmd_count =
+					__atomic_sub_fetch(&vf->pend_cmd_count,
+							1, __ATOMIC_RELAXED);
+					if (cmd_count == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +398,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +419,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +448,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +501,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +552,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +596,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +636,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +679,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +700,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +731,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +759,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +792,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +834,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +878,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +924,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +956,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +988,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1080,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1121,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1162,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1222,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1249,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1284,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1324,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1351,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1378,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1438,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1485,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1526,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1549,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1575,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1596,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1629,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1674,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1720,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1763,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgment
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |  107 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2823 insertions(+), 27 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8bd2b830ee..bac72590bc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -426,5 +433,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f892306f18..dba505494f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 364904fa02..2befa125ac 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f2b54e1944..3681a96b31 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..633fedf860
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 9663e6514c..1b0b869239 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1084,6 +1100,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1402,6 +1482,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1544,6 +1626,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1782,6 +1866,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2094,6 +2180,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2127,15 +2225,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2164,7 +2266,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2176,8 +2279,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2191,6 +2299,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2303,6 +2443,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2331,7 +2482,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2341,17 +2494,24 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
 					RTE_MBUF_F_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2402,7 +2562,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2410,7 +2570,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1da1278452..b88c81f8f6 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
-		RTE_ETH_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -36,10 +37,10 @@
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_VLAN |		 \
-		RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		DEV_RX_OFFLOAD_CHECKSUM |		 \
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_RX_OFFLOAD_VLAN |		 \
+		DEV_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
@@ -47,23 +48,26 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
 
-#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
+#define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
+		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG)
 
-#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 |		 \
+#define IAVF_TX_OFFLOAD_MASK (  \
+		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
 		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
 		RTE_MBUF_F_TX_IPV6 |			 \
 		RTE_MBUF_F_TX_IPV4 |			 \
 		RTE_MBUF_F_TX_VLAN |		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -161,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -209,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -243,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -364,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index df15e589d4..145b059837 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1776,3 +1776,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h              | 21 ++++++-
 drivers/net/iavf/iavf_ethdev.c       | 84 +++++++++++++++++++++++-----
 drivers/net/iavf/iavf_ipsec_crypto.c |  2 +-
 drivers/net/iavf/iavf_rxtx.h         | 12 ----
 4 files changed, 90 insertions(+), 29 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index bac72590bc..53c99d0f0e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index dba505494f..783a10060c 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1527,7 +1544,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1589,7 +1606,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1609,6 +1637,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1619,6 +1668,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1631,11 +1681,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 633fedf860..eabc4be6bc 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -509,7 +509,7 @@ iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
 			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
 	} else {
 		uint32_t *v6_dst_addr =
-			conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
+			(uint32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr;
 
 		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index b88c81f8f6..c7156d1daa 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 53c99d0f0e..614cf7d070 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 783a10060c..ae0f8f17f4 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2466,6 +2552,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2549,6 +2640,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-28 15:52   ` Radu Nicolau
  6 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 15:52 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index dd3519e1e2..01f514239e 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -27,6 +27,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -65,3 +66,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index a1e236ad75..fd235e1463 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 1ccac87b73..9c13ceed1c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -163,6 +163,7 @@ New Features
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added PPPoL2TPv2oUDP RSS hash based on inner IP address and TCP/UDP port.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec inline crypto support
  2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
                   ` (15 preceding siblings ...)
  2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
@ 2021-10-28 16:04 ` Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
                     ` (7 more replies)
  16 siblings, 8 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  Cc: dev, declan.doherty, abhijit.sinha, jingjing.wu, qi.z.zhang,
	beilei.xing, bruce.richardson, konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.

Radu Nicolau (7):
  common/iavf: add iAVF IPsec inline crypto support
  net/iavf: rework Tx path
  net/iavf: add support for asynchronous virt channel messages
  net/iavf: add iAVF IPsec inline crypto support
  net/iavf: add xstats support for inline IPsec crypto
  net/iavf: add watchdog for VFLR
  net/iavf: update doc with inline crypto support

 doc/guides/nics/features/iavf.ini             |    2 +
 doc/guides/nics/intel_vf.rst                  |   10 +
 doc/guides/rel_notes/release_21_11.rst        |    1 +
 drivers/common/iavf/iavf_type.h               |    1 +
 drivers/common/iavf/virtchnl.h                |   17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
 drivers/net/iavf/iavf.h                       |   61 +-
 drivers/net/iavf/iavf_ethdev.c                |  219 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  716 +++++--
 drivers/net/iavf/iavf_rxtx.h                  |  212 +-
 drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
 drivers/net/iavf/iavf_vchnl.c                 |  169 +-
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 20 files changed, 4113 insertions(+), 319 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

-- 
v2: small updates and fixes in the flow related section
v3: split the huge patch and address feedback
v4: small changes due to dependencies changes
v5: updated the watchdow patch
v6: rebased and updated the common section
v7: fixed TSO issue and disabled watchdog by default
v8: rebased to next-net-intel and added doc updates
v9: fixed IV len for AEAD and GMAC
v10: removed blank lines at EOF
v11: rebased patchset
v12: rebased patchset to RC1
v13: fixed coding style issues

2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 1/7] common/iavf: add iAVF IPsec inline crypto support
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework Tx path Radu Nicolau
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 drivers/common/iavf/iavf_type.h             |   1 +
 drivers/common/iavf/virtchnl.h              |  17 +-
 drivers/common/iavf/virtchnl_inline_ipsec.h | 553 ++++++++++++++++++++
 3 files changed, 569 insertions(+), 2 deletions(-)
 create mode 100644 drivers/common/iavf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h
index 73dfb47e70..51267ca3b3 100644
--- a/drivers/common/iavf/iavf_type.h
+++ b/drivers/common/iavf/iavf_type.h
@@ -723,6 +723,7 @@ enum iavf_tx_desc_dtype_value {
 	IAVF_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
 	IAVF_TX_DESC_DTYPE_CONTEXT	= 0x1,
 	IAVF_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	IAVF_TX_DESC_DTYPE_IPSEC	= 0x3,
 	IAVF_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
 	IAVF_TX_DESC_DTYPE_DDP_CTX	= 0x9,
 	IAVF_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 067f715945..269578f7c0 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -38,6 +38,8 @@
  * value in current and future projects
  */
 
+#include "virtchnl_inline_ipsec.h"
+
 /* Error Codes */
 enum virtchnl_status_code {
 	VIRTCHNL_STATUS_SUCCESS				= 0,
@@ -133,7 +135,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
 	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
 	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
-	/* opcodes 34, 35, 36, and 37 are reserved */
+	VIRTCHNL_OP_INLINE_IPSEC_CRYPTO = 34,
+	/* opcodes 35 and 36 are reserved */
 	VIRTCHNL_OP_DCF_CONFIG_BW = 37,
 	VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38,
 	VIRTCHNL_OP_DCF_CMD_DESC = 39,
@@ -225,6 +228,8 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
 		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+		return "VIRTCHNL_OP_INLINE_IPSEC_CRYPTO";
 	case VIRTCHNL_OP_DCF_CMD_DESC:
 		return "VIRTCHNL_OP_DCF_CMD_DESC";
 	case VIRTCHNL_OP_DCF_CMD_BUFF:
@@ -385,7 +390,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
 /* used to negotiate communicating link speeds in Mbps */
 #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
-	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
@@ -2291,6 +2296,14 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 				      sizeof(struct virtchnl_queue_vector);
 		}
 		break;
+
+	case VIRTCHNL_OP_INLINE_IPSEC_CRYPTO:
+	{
+		struct inline_ipsec_msg *iim = (struct inline_ipsec_msg *)msg;
+		valid_len =
+			virtchnl_inline_ipsec_val_msg_len(iim->ipsec_opcode);
+		break;
+	}
 	/* These are always errors coming from the VF. */
 	case VIRTCHNL_OP_EVENT:
 	case VIRTCHNL_OP_UNKNOWN:
diff --git a/drivers/common/iavf/virtchnl_inline_ipsec.h b/drivers/common/iavf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..1e9134501e
--- /dev/null
+++ b/drivers/common/iavf/virtchnl_inline_ipsec.h
@@ -0,0 +1,553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2021 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+} __rte_packed;
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+} __rte_packed;
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+} __rte_packed;
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+} __rte_packed;
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+} __rte_packed;
+
+
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4	(0)
+#define VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6	(1)
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+} __rte_packed;
+
+
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+} __rte_packed;
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+} __rte_packed;
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 2/7] net/iavf: rework Tx path
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Bruce Richardson, Konstantin Ananyev
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, Radu Nicolau

Rework the Tx path and Tx descriptor usage in order to
allow for better use of oflload flags and to facilitate enabling of
inline crypto offload feature.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c         | 544 ++++++++++++++++-----------
 drivers/net/iavf/iavf_rxtx.h         | 117 +++++-
 drivers/net/iavf/iavf_rxtx_vec_sse.c |  10 +-
 3 files changed, 437 insertions(+), 234 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 52d919ca1b..9663e6514c 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1054,27 +1054,34 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 
 static inline void
 iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
-			  volatile union iavf_rx_flex_desc *rxdp,
-			  uint8_t rx_flags)
+			  volatile union iavf_rx_flex_desc *rxdp)
 {
-	uint16_t vlan_tci = 0;
-
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 &&
-	    rte_le_to_cpu_64(rxdp->wb.status_error0) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+		(1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_VLAN |
+				RTE_MBUF_F_RX_VLAN_STRIPPED;
+		mb->vlan_tci =
+			rte_le_to_cpu_16(rxdp->wb.l2tag1);
+	} else {
+		mb->vlan_tci = 0;
+	}
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 &&
-	    rte_le_to_cpu_16(rxdp->wb.status_error1) &
-	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S))
-		vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
-#endif
-
-	if (vlan_tci) {
-		mb->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
-		mb->vlan_tci = vlan_tci;
+	if (rte_le_to_cpu_16(rxdp->wb.status_error1) &
+	    (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) {
+		mb->ol_flags |= RTE_MBUF_F_RX_QINQ_STRIPPED |
+				RTE_MBUF_F_RX_QINQ |
+				RTE_MBUF_F_RX_VLAN_STRIPPED |
+				RTE_MBUF_F_RX_VLAN;
+		mb->vlan_tci_outer = mb->vlan_tci;
+		mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd);
+		PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_1st),
+			   rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd));
+	} else {
+		mb->vlan_tci_outer = 0;
 	}
+#endif
 }
 
 /* Translate the rx descriptor status and error fields to pkt flags */
@@ -1394,7 +1401,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->ol_flags = 0;
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1536,7 +1543,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->ol_flags = 0;
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
-		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags);
+		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1774,7 +1781,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
-			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags);
+			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2068,190 +2075,305 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 	return 0;
 }
 
-/* Check if the context descriptor is needed for TX offloading */
+
+
+static inline void
+iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
+{
+	uint64_t cmd = 0;
+
+	/* TSO enabled */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		cmd = IAVF_TX_CTX_DESC_TSO << IAVF_TXD_DATA_QW1_CMD_SHIFT;
+
+	/* Time Sync - Currently not supported */
+
+	/* Outer L2 TAG 2 Insertion - Currently not supported */
+	/* Inner L2 TAG 2 Insertion - Currently not supported */
+
+	*field |= cmd;
+}
+
+static inline void
+iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
+		const struct rte_mbuf *m)
+{
+	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
+	uint64_t eip_len = 0;
+	uint64_t eip_noinc = 0;
+	/* Default - IP_ID is increment in each segment of LSO */
+
+	switch (m->ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 |
+			RTE_MBUF_F_TX_OUTER_IPV6 |
+			RTE_MBUF_F_TX_OUTER_IP_CKSUM)) {
+	case RTE_MBUF_F_TX_OUTER_IPV4:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	case RTE_MBUF_F_TX_OUTER_IPV6:
+		eip_typ = IAVF_TX_CTX_DESC_EIPT_IPV6;
+		eip_len = m->outer_l3_len >> 2;
+	break;
+	}
+
+	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
+		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
+		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
+}
+
 static inline uint16_t
-iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
+iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
+	struct rte_mbuf *m)
 {
-	if (flags & RTE_MBUF_F_TX_TCP_SEG)
-		return 1;
-	if (flags & RTE_MBUF_F_TX_VLAN &&
-	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		return 1;
-	return 0;
+	uint64_t segmentation_field = 0;
+	uint64_t total_length = 0;
+
+	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		total_length -= m->outer_l3_len;
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX
+	if (!m->l4_len || !m->tso_segsz)
+		PMD_TX_LOG(DEBUG, "L4 length %d, LSO Segment size %d",
+			 m->l4_len, m->tso_segsz);
+	if (m->tso_segsz < 88)
+		PMD_TX_LOG(DEBUG, "LSO Segment size %d is less than minimum %d",
+			m->tso_segsz, 88);
+#endif
+	segmentation_field =
+		(((uint64_t)total_length << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) &
+				IAVF_TXD_CTX_QW1_TSO_LEN_MASK) |
+		(((uint64_t)m->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT) &
+				IAVF_TXD_CTX_QW1_MSS_MASK);
+
+	*field |= segmentation_field;
+
+	return total_length;
+}
+
+
+struct iavf_tx_context_desc_qws {
+	__le64 qw0;
+	__le64 qw1;
+};
+
+static inline void
+iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
+	struct rte_mbuf *m, uint16_t *tlen)
+{
+	volatile struct iavf_tx_context_desc_qws *desc_qws =
+			(volatile struct iavf_tx_context_desc_qws *)desc;
+	/* fill descriptor type field */
+	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
+
+	/* fill command field */
+	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m);
+
+	/* fill segmentation field */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
+				m);
+	}
+
+	/* fill tunnelling field */
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
+	else
+		desc_qws->qw0 = 0;
+
+	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
+	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
 }
 
+
 static inline void
-iavf_txd_enable_checksum(uint64_t ol_flags,
-			uint32_t *td_cmd,
-			uint32_t *td_offset,
-			union iavf_tx_offload tx_offload)
+iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
+		struct rte_mbuf *m)
 {
+	uint64_t command = 0;
+	uint64_t offset = 0;
+	uint64_t l2tag1 = 0;
+
+	*qw1 = IAVF_TX_DESC_DTYPE_DATA;
+
+	command = (uint64_t)IAVF_TX_DESC_CMD_ICRC;
+
+	/* Descriptor based VLAN insertion */
+	if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
+		command |= (uint64_t)IAVF_TX_DESC_CMD_IL2TAG1;
+		l2tag1 |= m->vlan_tci;
+	}
+
 	/* Set MACLEN */
-	*td_offset |= (tx_offload.l2_len >> 1) <<
-		      IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-	/* Enable L3 checksum offloads */
-	if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	} else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
-		*td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
-		*td_offset |= (tx_offload.l3_len >> 2) <<
-			      IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
-	}
-
-	if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (tx_offload.l4_len >> 2) <<
+	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+	/* Enable L3 checksum offloading inner */
+	if (m->ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_IPV4)) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV4;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
+		command |= IAVF_TX_DESC_CMD_IIPT_IPV6;
+		offset |= (m->l3_len >> 2) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+	}
+
+	if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (m->l4_len >> 2) <<
 			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		return;
 	}
 
 	/* Enable L4 checksum offloads */
-	switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+	switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
 	case RTE_MBUF_F_TX_TCP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
-		*td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_SCTP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-		*td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	case RTE_MBUF_F_TX_UDP_CKSUM:
-		*td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
-		*td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-			      IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
-		break;
-	default:
+		command |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+				IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 		break;
 	}
+
+	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
+		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
+		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
+		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
+		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
-/* set TSO context descriptor
- * support IP -> L4 and IP -> IP -> L4
- */
-static inline uint64_t
-iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
+static inline void
+iavf_fill_data_desc_buffer_sz_field(volatile uint64_t *field,  uint16_t value)
 {
-	uint64_t ctx_desc = 0;
-	uint32_t cd_cmd, hdr_len, cd_tso_len;
-
-	if (!tx_offload.l4_len) {
-		PMD_TX_LOG(DEBUG, "L4 length set to 0");
-		return ctx_desc;
+	*field |= (((uint64_t)value << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 	}
 
-	hdr_len = tx_offload.l2_len +
-		  tx_offload.l3_len +
-		  tx_offload.l4_len;
+static inline void
+iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
+	struct rte_mbuf *m, uint64_t desc_template,
+	uint16_t tlen, uint16_t ipseclen)
+{
+	uint32_t hdrlen = m->l2_len;
+	uint32_t bufsz = 0;
+
+	/* fill data descriptor qw1 from template */
+	desc->cmd_type_offset_bsz = desc_template;
+
+	/* set data buffer address */
+	desc->buffer_addr = rte_mbuf_data_iova(m);
+
+	/* calculate data buffer size less set header lengths */
+	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
+			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+					RTE_MBUF_F_TX_UDP_SEG))) {
+		hdrlen += m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+			hdrlen += m->l3_len + m->l4_len;
+		else
+			hdrlen += m->l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			hdrlen += ipseclen;
+		bufsz = hdrlen + tlen;
+	} else {
+		bufsz = m->data_len;
+	}
 
-	cd_cmd = IAVF_TX_CTX_DESC_TSO;
-	cd_tso_len = mbuf->pkt_len - hdr_len;
-	ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
-		     ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-		     ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
+	/* set data buffer size */
+	desc->cmd_type_offset_bsz |=
+		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 
-	return ctx_desc;
+	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
-/* Construct the tx flags */
-static inline uint64_t
-iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
-	       uint32_t td_tag)
-{
-	return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
-				((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
-				((uint64_t)td_offset <<
-				 IAVF_TXD_QW1_OFFSET_SHIFT) |
-				((uint64_t)size  <<
-				 IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
-				((uint64_t)td_tag  <<
-				 IAVF_TXD_QW1_L2TAG1_SHIFT));
-}
 
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-	volatile struct iavf_tx_desc *txd;
-	volatile struct iavf_tx_desc *txr;
-	struct iavf_tx_queue *txq;
-	struct iavf_tx_entry *sw_ring;
+	struct iavf_tx_queue *txq = tx_queue;
+	volatile struct iavf_tx_desc *txr = txq->tx_ring;
+	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
-	struct rte_mbuf *tx_pkt;
-	struct rte_mbuf *m_seg;
-	uint16_t tx_id;
-	uint16_t nb_tx;
-	uint32_t td_cmd;
-	uint32_t td_offset;
-	uint32_t td_tag;
-	uint64_t ol_flags;
-	uint16_t nb_used;
-	uint16_t nb_ctx;
-	uint16_t tx_last;
-	uint16_t slen;
-	uint64_t buf_dma_addr;
-	uint16_t cd_l2tag2 = 0;
-	union iavf_tx_offload tx_offload = {0};
-
-	txq = tx_queue;
-	sw_ring = txq->sw_ring;
-	txr = txq->tx_ring;
-	tx_id = txq->tx_tail;
-	txe = &sw_ring[tx_id];
+	struct rte_mbuf *mb, *mb_seg;
+	uint16_t desc_idx, desc_idx_last;
+	uint16_t idx;
+
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
-		(void)iavf_xmit_cleanup(txq);
+		iavf_xmit_cleanup(txq);
+
+	desc_idx = txq->tx_tail;
+	txe = &txe_ring[desc_idx];
+
+#ifdef RTE_LIBRTE_IAVF_DEBUG_TX_DESC_RING
+		iavf_dump_tx_entry_ring(txq);
+		iavf_dump_tx_desc_ring(txq);
+#endif
+
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		volatile struct iavf_tx_desc *ddesc;
+		uint16_t nb_desc_ctx;
+		uint16_t nb_desc_data, nb_desc_required;
+		uint16_t tlen = 0, ipseclen = 0;
+		uint64_t ddesc_template = 0;
+		uint64_t ddesc_cmd = 0;
 
-	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-		td_cmd = 0;
-		td_tag = 0;
-		td_offset = 0;
+		mb = tx_pkts[idx];
 
-		tx_pkt = *tx_pkts++;
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
-		ol_flags = tx_pkt->ol_flags;
-		tx_offload.l2_len = tx_pkt->l2_len;
-		tx_offload.l3_len = tx_pkt->l3_len;
-		tx_offload.l4_len = tx_pkt->l4_len;
-		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-		/* Calculate the number of context descriptors needed. */
-		nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag);
+		nb_desc_data = mb->nb_segs;
+		nb_desc_ctx = !!(mb->ol_flags &
+			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
+					RTE_MBUF_F_TX_TUNNEL_MASK));
 
-		/* The number of descriptors that must be allocated for
+		/**
+		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
-		 * packet plus 1 context descriptor if needed.
+		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
-		tx_last = (uint16_t)(tx_id + nb_used - 1);
+		nb_desc_required = nb_desc_data + nb_desc_ctx;
 
-		/* Circular ring */
-		if (tx_last >= txq->nb_tx_desc)
-			tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
+		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
-		PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u"
-			   " tx_first=%u tx_last=%u",
-			   txq->port_id, txq->queue_id, tx_id, tx_last);
+		/* wrap descriptor ring */
+		if (desc_idx_last >= txq->nb_tx_desc)
+			desc_idx_last =
+				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
 
-		if (nb_used > txq->nb_free) {
+		PMD_TX_LOG(DEBUG,
+			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
+			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
+
+		if (nb_desc_required > txq->nb_free) {
 			if (iavf_xmit_cleanup(txq)) {
-				if (nb_tx == 0)
+				if (idx == 0)
 					return 0;
 				goto end_of_tx;
 			}
-			if (unlikely(nb_used > txq->rs_thresh)) {
-				while (nb_used > txq->nb_free) {
+			if (unlikely(nb_desc_required > txq->rs_thresh)) {
+				while (nb_desc_required > txq->nb_free) {
 					if (iavf_xmit_cleanup(txq)) {
-						if (nb_tx == 0)
+						if (idx == 0)
 							return 0;
 						goto end_of_tx;
 					}
@@ -2259,122 +2381,94 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 		}
 
-		/* Descriptor based VLAN insertion */
-		if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-		    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
-			td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
-			td_tag = tx_pkt->vlan_tci;
-		}
-
-		/* According to datasheet, the bit2 is reserved and must be
-		 * set to 1.
-		 */
-		td_cmd |= 0x04;
-
-		/* Enable checksum offloading */
-		if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
-			iavf_txd_enable_checksum(ol_flags, &td_cmd,
-						&td_offset, tx_offload);
+		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb);
 
-		if (nb_ctx) {
 			/* Setup TX context descriptor if required */
-			uint64_t cd_type_cmd_tso_mss =
-				IAVF_TX_DESC_DTYPE_CONTEXT;
-			volatile struct iavf_tx_context_desc *ctx_txd =
+		if (nb_desc_ctx) {
+			volatile struct iavf_tx_context_desc *ctx_desc =
 				(volatile struct iavf_tx_context_desc *)
-							&txr[tx_id];
+					&txr[desc_idx];
 
 			/* clear QW0 or the previous writeback value
 			 * may impact next write
 			 */
-			*(volatile uint64_t *)ctx_txd = 0;
+			*(volatile uint64_t *)ctx_desc = 0;
 
-			txn = &sw_ring[txe->next_id];
+			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
 			if (txe->mbuf) {
 				rte_pktmbuf_free_seg(txe->mbuf);
 				txe->mbuf = NULL;
 			}
 
-			/* TSO enabled */
-			if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-				cd_type_cmd_tso_mss |=
-					iavf_set_tso_ctx(tx_pkt, tx_offload);
+			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
-			if (ol_flags & RTE_MBUF_F_TX_VLAN &&
-			    txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
-				cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2
-					<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-				cd_l2tag2 = tx_pkt->vlan_tci;
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
 			}
 
-			ctx_txd->type_cmd_tso_mss =
-				rte_cpu_to_le_64(cd_type_cmd_tso_mss);
-			ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
 
-			IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
-			txe = txn;
-		}
 
-		m_seg = tx_pkt;
+		mb_seg = mb;
+
 		do {
-			txd = &txr[tx_id];
-			txn = &sw_ring[txe->next_id];
+			ddesc = (volatile struct iavf_tx_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
 			if (txe->mbuf)
 				rte_pktmbuf_free_seg(txe->mbuf);
-			txe->mbuf = m_seg;
-
-			/* Setup TX Descriptor */
-			slen = m_seg->data_len;
-			buf_dma_addr = rte_mbuf_data_iova(m_seg);
-			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-			txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
-								  td_offset,
-								  slen,
-								  td_tag);
-
-			IAVF_DUMP_TX_DESC(txq, txd, tx_id);
-			txe->last_id = tx_last;
-			tx_id = txe->next_id;
+
+			txe->mbuf = mb_seg;
+			iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
 			txe = txn;
-			m_seg = m_seg->next;
-		} while (m_seg);
+			mb_seg = mb_seg->next;
+		} while (mb_seg);
 
 		/* The last packet data descriptor needs End Of Packet (EOP) */
-		td_cmd |= IAVF_TX_DESC_CMD_EOP;
-		txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
-		txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
+		ddesc_cmd = IAVF_TX_DESC_CMD_EOP;
+
+		txq->nb_used = (uint16_t)(txq->nb_used + nb_desc_required);
+		txq->nb_free = (uint16_t)(txq->nb_free - nb_desc_required);
 
 		if (txq->nb_used >= txq->rs_thresh) {
 			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
 				   "%4u (port=%d queue=%d)",
-				   tx_last, txq->port_id, txq->queue_id);
+				   desc_idx_last, txq->port_id, txq->queue_id);
 
-			td_cmd |= IAVF_TX_DESC_CMD_RS;
+			ddesc_cmd |= IAVF_TX_DESC_CMD_RS;
 
 			/* Update txq RS bit counters */
 			txq->nb_used = 0;
 		}
 
-		txd->cmd_type_offset_bsz |=
-			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-					 IAVF_TXD_QW1_CMD_SHIFT);
-		IAVF_DUMP_TX_DESC(txq, txd, tx_id);
+		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+
+		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
 	}
 
 end_of_tx:
 	rte_wmb();
 
 	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, tx_id, nb_tx);
+		   txq->port_id, txq->queue_id, desc_idx, idx);
 
-	IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
-	txq->tx_tail = tx_id;
+	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
+	txq->tx_tail = desc_idx;
 
-	return nb_tx;
+	return idx;
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 84351011f1..1da1278452 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -403,6 +403,112 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+
+#define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
+#define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_CMD_SHIFT	(4)
+#define IAVF_TXD_DATA_QW1_CMD_MASK	(0x3FFUL << IAVF_TXD_DATA_QW1_CMD_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_SHIFT	(16)
+#define IAVF_TXD_DATA_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_MACLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_MACLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_IPLEN_MASK	\
+	(0x7FUL << IAVF_TXD_DATA_QW1_OFFSET_IPLEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT	\
+	(IAVF_TXD_DATA_QW1_OFFSET_SHIFT + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_OFFSET_L4LEN_MASK	\
+	(0xFUL << IAVF_TXD_DATA_QW1_OFFSET_L4LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_MACLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_IPLEN_MASK	\
+	(0x7FUL << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_L4LEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define IAVF_TXD_DATA_QW1_FCLEN_MASK	\
+	(0xFUL << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT	(34)
+#define IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK	\
+	(0x3FFFULL << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT)
+
+#define IAVF_TXD_DATA_QW1_L2TAG1_SHIFT		(48)
+#define IAVF_TXD_DATA_QW1_L2TAG1_MASK		\
+	(0xFFFFULL << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT	(11)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_MASK	\
+	(0x7UL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT	(14)
+#define IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_MASK	\
+	(0xFUL << IAVF_TXD_CTX_QW1_IPSEC_PARAMS_ICVLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT		(30)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_MASK		\
+	(0x3FFFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_SHIFT	(30)
+#define IAVF_TXD_CTX_QW1_TSYNC_PARAMS_TLEN_MASK		\
+	(0x3FUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_TLEN_SHIFT)
+
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT		(50)
+#define IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_MASK		\
+	(0x3FFFUL << IAVF_TXD_CTX_QW1_SEG_PARAMS_MSS_SHIFT)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT		(0)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_external_ip_type {
+	IAVF_TX_CTX_DESC_EIPT_NONE,
+	IAVF_TX_CTX_DESC_EIPT_IPV6,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_NO_CHECKSUM_OFFLOAD,
+	IAVF_TX_CTX_DESC_EIPT_IPV4_CHECKSUM_OFFLOAD
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT	(2)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_MASK		(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_SHIFT	(9)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNT_MASK		(0x3UL)
+
+enum iavf_tx_ctx_desc_tunnel_l4_tunnel_type {
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_NO_UDP_GRE,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_UDP,
+	IAVF_TX_CTX_DESC_L4_TUN_TYP_GRE
+};
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT	(11)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_MASK	(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_SHIFT	(12)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4TUNLEN_MASK	(0x7FUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_SHIFT	(19)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_DECTTL_MASK		(0xFUL)
+
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_SHIFT	(23)
+#define IAVF_TXD_CTX_QW0_TUN_PARAMS_L4T_CS_MASK		(0x1UL)
+
+#define IAVF_TXD_CTX_QW0_L2TAG2_PARAM			(32)
+#define IAVF_TXD_CTX_QW0_L2TAG2_MASK			(0xFFFFUL)
+
+
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK	(0xFFFFF)
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
+
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M	(0x3FF) /* 10-bits */
 
@@ -553,9 +659,10 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	const volatile struct iavf_tx_desc *tx_desc = desc;
 	enum iavf_tx_desc_dtype_value type;
 
-	type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
-		tx_desc->cmd_type_offset_bsz &
-		rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+
+	type = (enum iavf_tx_desc_dtype_value)
+		rte_le_to_cpu_64(tx_desc->cmd_type_offset_bsz &
+			rte_cpu_to_le_64(IAVF_TXD_DATA_QW1_DTYPE_MASK));
 	switch (type) {
 	case IAVF_TX_DESC_DTYPE_DATA:
 		name = "Tx_data_desc";
@@ -569,8 +676,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	}
 
 	printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
-	       txq->queue_id, name, tx_id, tx_desc->buffer_addr,
-	       tx_desc->cmd_type_offset_bsz);
+		txq->queue_id, name, tx_id, tx_desc->buffer_addr,
+		tx_desc->cmd_type_offset_bsz);
 }
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index d4f4d705b7..1bac59bf0e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -363,10 +363,12 @@ static inline void
 flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
 		     const uint32_t *type_table)
 {
-	const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M,
-						 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+	const __m128i ptype_mask =
+			_mm_set_epi16(IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+				IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
 	__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
 	__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
 	__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework Tx path Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-10-29 20:33     ` Ferruh Yigit
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
                     ` (4 subsequent siblings)
  7 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for asynchronous virtual channel messages, specifically for
inline IPsec messages.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h       |  25 +++++-
 drivers/net/iavf/iavf_vchnl.c | 140 +++++++++++++++++++++-------------
 2 files changed, 110 insertions(+), 55 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 12f541f539..8bd2b830ee 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -193,6 +193,7 @@ struct iavf_info {
 	uint64_t supported_rxdid;
 	uint8_t *proto_xtr; /* proto xtr type for all queues */
 	volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+	uint32_t pend_cmd_count;
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
@@ -339,15 +340,35 @@ _clear_cmd(struct iavf_info *vf)
 static inline int
 _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 {
-	int ret = rte_atomic32_cmpset((volatile uint32_t *)&vf->pend_cmd,
-		VIRTCHNL_OP_UNKNOWN, ops);
+	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
+			&op_unk, &ops,
+			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
 
 	if (!ret)
 		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
 
+	__atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
+
 	return !ret;
 }
 
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
+{
+	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
+	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
+			&op_unk, &ops,
+			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+
+	if (!ret)
+		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+	__atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
+
+	return !ret;
+}
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index bb65dbf04f..df15e589d4 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -24,8 +24,8 @@
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
-#define MAX_TRY_TIMES 200
-#define ASQ_DELAY_MS  10
+#define MAX_TRY_TIMES 2000
+#define ASQ_DELAY_MS  1
 
 static uint32_t
 iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)
@@ -143,7 +143,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 }
 
 static int
-iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
+iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args,
+	int async)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
@@ -155,8 +156,14 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
 	if (vf->vf_reset)
 		return -EIO;
 
-	if (_atomic_set_cmd(vf, args->ops))
-		return -1;
+
+	if (async) {
+		if (_atomic_set_async_response_cmd(vf, args->ops))
+			return -1;
+	} else {
+		if (_atomic_set_cmd(vf, args->ops))
+			return -1;
+	}
 
 	ret = iavf_aq_send_msg_to_pf(hw, args->ops, IAVF_SUCCESS,
 				    args->in_args, args->in_args_size, NULL);
@@ -252,9 +259,11 @@ static void
 iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			uint16_t msglen)
 {
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 	struct virtchnl_pf_event *pf_msg =
 			(struct virtchnl_pf_event *)msg;
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 
 	if (msglen < sizeof(struct virtchnl_pf_event)) {
 		PMD_DRV_LOG(DEBUG, "Error event");
@@ -330,18 +339,42 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
 		case iavf_aqc_opc_send_msg_to_vf:
 			if (msg_opc == VIRTCHNL_OP_EVENT) {
 				iavf_handle_pf_event_msg(dev, info.msg_buf,
-							info.msg_len);
+						info.msg_len);
 			} else {
+				/* check for inline IPsec events */
+				struct inline_ipsec_msg *imsg =
+					(struct inline_ipsec_msg *)info.msg_buf;
+				struct rte_eth_event_ipsec_desc desc;
+				if (msg_opc ==
+					VIRTCHNL_OP_INLINE_IPSEC_CRYPTO &&
+					imsg->ipsec_opcode ==
+						INLINE_IPSEC_OP_EVENT) {
+					struct virtchnl_ipsec_event *ev =
+							imsg->ipsec_data.event;
+					desc.subtype =
+						RTE_ETH_EVENT_IPSEC_UNKNOWN;
+					desc.metadata = ev->ipsec_event_data;
+					rte_eth_dev_callback_process(dev,
+							RTE_ETH_EVENT_IPSEC,
+							&desc);
+					return;
+				}
+
 				/* read message and it's expected one */
-				if (msg_opc == vf->pend_cmd)
-					_notify_cmd(vf, msg_ret);
-				else
-					PMD_DRV_LOG(ERR, "command mismatch,"
-						    "expect %u, get %u",
-						    vf->pend_cmd, msg_opc);
+				if (msg_opc == vf->pend_cmd) {
+					uint32_t cmd_count =
+					__atomic_sub_fetch(&vf->pend_cmd_count,
+							1, __ATOMIC_RELAXED);
+					if (cmd_count == 0)
+						_notify_cmd(vf, msg_ret);
+				} else {
+					PMD_DRV_LOG(ERR,
+					"command mismatch, expect %u, get %u",
+						vf->pend_cmd, msg_opc);
+				}
 				PMD_DRV_LOG(DEBUG,
-					    "adminq response is received,"
-					    " opcode = %d", msg_opc);
+				"adminq response is received, opcode = %d",
+						msg_opc);
 			}
 			break;
 		default:
@@ -365,7 +398,7 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_ENABLE_VLAN_STRIPPING");
@@ -386,7 +419,7 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " OP_DISABLE_VLAN_STRIPPING");
@@ -415,7 +448,7 @@ iavf_check_api_version(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
 		return err;
@@ -468,12 +501,13 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
-		VIRTCHNL_VF_OFFLOAD_QOS;
+		VIRTCHNL_VF_OFFLOAD_QOS |
++		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -518,7 +552,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
@@ -562,7 +596,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_strip);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" :
@@ -602,7 +636,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable)
 	args.in_args_size = sizeof(vlan_insert);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" :
@@ -645,7 +679,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(vlan_filter);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN_V2" :  "OP_DEL_VLAN_V2");
@@ -666,7 +700,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	ret = iavf_execute_vf_cmd(adapter, &args);
+	ret = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (ret) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
@@ -697,7 +731,7 @@ iavf_enable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES");
@@ -725,7 +759,7 @@ iavf_disable_queues(struct iavf_adapter *adapter)
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES");
@@ -758,7 +792,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = sizeof(queue_select);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES");
@@ -800,7 +834,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_ENABLE_QUEUES_V2");
@@ -844,7 +878,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_DISABLE_QUEUES_V2");
@@ -890,7 +924,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of %s",
 			    on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2");
@@ -922,7 +956,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_LUT");
@@ -954,7 +988,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_CONFIG_RSS_KEY");
@@ -1046,7 +1080,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
@@ -1087,7 +1121,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
 
@@ -1128,7 +1162,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
 	args.in_args_size = len;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR");
 
@@ -1188,7 +1222,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
 		args.in_args_size = len;
 		args.out_buffer = vf->aq_resp;
 		args.out_size = IAVF_AQ_BUF_SZ;
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		if (err)
 			PMD_DRV_LOG(ERR, "fail to execute command %s",
 				    add ? "OP_ADD_ETHER_ADDRESS" :
@@ -1215,7 +1249,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
 		*pstats = NULL;
@@ -1250,7 +1284,7 @@ iavf_config_promisc(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1290,7 +1324,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr,
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_ETH_ADDR" :  "OP_DEL_ETH_ADDR");
@@ -1317,7 +1351,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
 	args.in_args_size = sizeof(cmd_buffer);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
 			    add ? "OP_ADD_VLAN" :  "OP_DEL_VLAN");
@@ -1344,7 +1378,7 @@ iavf_fdir_add(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
 		return err;
@@ -1404,7 +1438,7 @@ iavf_fdir_del(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
 		return err;
@@ -1451,7 +1485,7 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
 		return err;
@@ -1492,7 +1526,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of %s",
@@ -1515,7 +1549,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_GET_RSS_HENA_CAPS");
@@ -1541,7 +1575,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR,
 			    "Failed to execute command of OP_SET_RSS_HENA");
@@ -1562,7 +1596,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter)
 	args.in_args_size = 0;
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR,
@@ -1595,7 +1629,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err)
 		PMD_DRV_LOG(ERR, "Failed to execute command of"
 			    " VIRTCHNL_OP_CONFIG_TC_MAP");
@@ -1640,7 +1674,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
 		i * sizeof(struct virtchnl_ether_addr);
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 
 	if (err) {
 		PMD_DRV_LOG(ERR, "fail to execute command %s",
@@ -1686,11 +1720,11 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
 		 * before iavf_read_msg_from_pf.
 		 */
 		rte_intr_disable(pci_dev->intr_handle);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_intr_enable(pci_dev->intr_handle);
 	} else {
 		rte_eal_alarm_cancel(iavf_dev_alarm_handler, dev);
-		err = iavf_execute_vf_cmd(adapter, &args);
+		err = iavf_execute_vf_cmd(adapter, &args, 0);
 		rte_eal_alarm_set(IAVF_ALARM_INTERVAL,
 				  iavf_dev_alarm_handler, dev);
 	}
@@ -1729,7 +1763,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 	args.out_buffer = vf->aq_resp;
 	args.out_size = IAVF_AQ_BUF_SZ;
 
-	err = iavf_execute_vf_cmd(adapter, &args);
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
 	if (err) {
 		PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION");
 		return err;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (2 preceding siblings ...)
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-10-29 17:33     ` Ferruh Yigit
  2021-10-30 20:41     ` David Marchand
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
                     ` (3 subsequent siblings)
  7 siblings, 2 replies; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add support for inline crypto for IPsec, for ESP transport and
tunnel over IPv4 and IPv6, as well as supporting the offload for
ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
Implement support for rte_security packet metadata

Add definition for IPsec descriptors, extend support for offload
in data and context descriptor to support

Add support to virtual channel mailbox for IPsec Crypto request
operations. IPsec Crypto requests receive an initial acknowledgment
from phsyical function driver of receipt of request and then an
asynchronous response with success/failure of request including any
response data.

Add enhanced descriptor debugging

Refactor of scalar tx burst function to support integration of offload

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h                       |   10 +
 drivers/net/iavf/iavf_ethdev.c                |   41 +-
 drivers/net/iavf/iavf_generic_flow.c          |   15 +
 drivers/net/iavf/iavf_generic_flow.h          |    2 +
 drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
 drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
 .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
 drivers/net/iavf/iavf_rxtx.c                  |  202 +-
 drivers/net/iavf/iavf_rxtx.h                  |  107 +-
 drivers/net/iavf/iavf_vchnl.c                 |   29 +
 drivers/net/iavf/meson.build                  |    3 +-
 drivers/net/iavf/rte_pmd_iavf.h               |    1 +
 drivers/net/iavf/version.map                  |    3 +
 13 files changed, 2823 insertions(+), 27 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
 create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8bd2b830ee..bac72590bc 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -221,6 +221,7 @@ struct iavf_info {
 	rte_spinlock_t flow_ops_lock;
 	struct iavf_parser_list rss_parser_list;
 	struct iavf_parser_list dist_parser_list;
+	struct iavf_parser_list ipsec_crypto_parser_list;
 
 	struct iavf_fdir_info fdir; /* flow director info */
 	/* indicate large VF support enabled or not */
@@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_IPV6_FLOW,
 	IAVF_PROTO_XTR_TCP,
 	IAVF_PROTO_XTR_IP_OFFSET,
+	IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
 	IAVF_PROTO_XTR_MAX,
 };
 
@@ -256,11 +258,14 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 };
 
+struct iavf_security_ctx;
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
 	struct rte_eth_dev_data *dev_data;
 	struct iavf_info vf;
+	struct iavf_security_ctx *security_ctx;
 
 	bool rx_bulk_alloc_allowed;
 	/* For vector PMD */
@@ -279,6 +284,8 @@ struct iavf_adapter {
 	(&((struct iavf_adapter *)adapter)->vf)
 #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct iavf_adapter *)adapter)->hw)
+#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
+	(((struct iavf_adapter *)adapter)->security_ctx)
 
 /* IAVF_VSI_TO */
 #define IAVF_VSI_TO_HW(vsi) \
@@ -426,5 +433,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
 			uint16_t size);
 void iavf_tm_conf_init(struct rte_eth_dev *dev);
 void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
+int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f892306f18..dba505494f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -30,6 +30,7 @@
 #include "iavf_rxtx.h"
 #include "iavf_generic_flow.h"
 #include "rte_pmd_iavf.h"
+#include "iavf_ipsec_crypto.h"
 
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
@@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[] = {
 	[IAVF_PROTO_XTR_IP_OFFSET] = {
 		.param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
 		.ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
+	[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
+		.param = {
+		.name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
+		.ol_flag =
+			&rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
 };
 
 static int iavf_dev_configure(struct rte_eth_dev *dev);
@@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 	iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
 				  false);
 
+	/* free iAVF security device context all related resources */
+	iavf_security_ctx_destroy(adapter);
+
 	adapter->stopped = 1;
 	dev->data->dev_started = 0;
 
@@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
 static int
 iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct iavf_info *vf = &adapter->vf;
 
 	dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
 	dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
@@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+	}
+
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
 		.rx_drop_en = 0,
@@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 		{ "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
 		{ "tcp",       IAVF_PROTO_XTR_TCP       },
 		{ "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
+		{ "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
 	};
 	uint32_t i;
 
@@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
 			return xtr_type_map[i].type;
 	}
 
-	PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
-		    "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
+	PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
+			"vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
 
 	return -1;
 }
@@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 		goto flow_init_err;
 	}
 
+	/** Check if the IPsec Crypto offload is supported and create
+	 *  security_ctx if it is.
+	 */
+	if (iavf_ipsec_crypto_supported(adapter)) {
+		/* Initialize security_ctx only for primary process*/
+		ret = iavf_security_ctx_create(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance");
+			return ret;
+		}
+
+		ret = iavf_security_init(adapter);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto resources");
+			return ret;
+		}
+	}
+
 	iavf_default_rss_disable(adapter);
 
 	return 0;
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index 364904fa02..2befa125ac 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
 	TAILQ_INIT(&vf->flow_list);
 	TAILQ_INIT(&vf->rss_parser_list);
 	TAILQ_INIT(&vf->dist_parser_list);
+	TAILQ_INIT(&vf->ipsec_crypto_parser_list);
 	rte_spinlock_init(&vf->flow_ops_lock);
 
 	RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
@@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
+		list = &vf->ipsec_crypto_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
@@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 
 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
 				    actions, error);
+	if (*engine)
+		return 0;
+
+	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
+			pattern, actions, error);
+	if (*engine)
+		return 0;
 
 	if (!*engine) {
 		rte_flow_error_set(error, EINVAL,
@@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
 		return flow;
 	}
 
+	/* Special case for inline crypto egress flows */
+	if (attr->egress && actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY)
+		goto free_flow;
+
 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
 			&engine, iavf_parse_engine_create, error);
 	if (ret < 0) {
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index f2b54e1944..3681a96b31 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 /* engine types. */
 enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_NONE = 0,
+	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
 	IAVF_FLOW_ENGINE_MAX,
@@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
  */
 enum iavf_flow_classification_stage {
 	IAVF_FLOW_STAGE_NONE = 0,
+	IAVF_FLOW_STAGE_IPSEC_CRYPTO,
 	IAVF_FLOW_STAGE_RSS,
 	IAVF_FLOW_STAGE_DISTRIBUTOR,
 	IAVF_FLOW_STAGE_MAX,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
new file mode 100644
index 0000000000..eabc4be6bc
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -0,0 +1,1894 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_security_driver.h>
+#include <rte_security.h>
+
+#include "iavf.h"
+#include "iavf_rxtx.h"
+#include "iavf_log.h"
+#include "iavf_generic_flow.h"
+
+#include "iavf_ipsec_crypto.h"
+#include "iavf_ipsec_crypto_capabilities.h"
+
+/**
+ * iAVF IPsec Crypto Security Context
+ */
+struct iavf_security_ctx {
+	struct iavf_adapter *adapter;
+	int pkt_md_offset;
+	struct rte_cryptodev_capabilities *crypto_capabilities;
+};
+
+/**
+ * iAVF IPsec Crypto Security Session Parameters
+ */
+struct iavf_security_session {
+	struct iavf_adapter *adapter;
+
+	enum rte_security_ipsec_sa_mode mode;
+	enum rte_security_ipsec_tunnel_type type;
+	enum rte_security_ipsec_sa_direction direction;
+
+	struct {
+		uint32_t spi; /* Security Parameter Index */
+		uint32_t hw_idx; /* SA Index in hardware table */
+	} sa;
+
+	struct {
+		uint8_t enabled :1;
+		union {
+			uint64_t value;
+			struct {
+				uint32_t hi;
+				uint32_t low;
+			};
+		};
+	} esn;
+
+	struct {
+		uint8_t enabled :1;
+	} udp_encap;
+
+	size_t iv_sz;
+	size_t icv_sz;
+	size_t block_sz;
+
+	struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
+};
+/**
+ *  IV Length field in IPsec Tx Desc uses the following encoding:
+ *
+ *  0B - 0
+ *  4B - 1
+ *  8B - 2
+ *  16B - 3
+ *
+ * but we also need the IV Length for TSO to correctly calculate the total
+ * header length so placing it in the upper 6-bits here for easier reterival.
+ */
+static inline uint8_t
+calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
+{
+	uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
+
+	switch (iv_sz) {
+	case 4:
+		iv_length = IAVF_IPSEC_IV_LEN_DW;
+		break;
+	case 8:
+		iv_length = IAVF_IPSEC_IV_LEN_DDW;
+		break;
+	case 16:
+		iv_length = IAVF_IPSEC_IV_LEN_QDW;
+		break;
+	}
+
+	return (iv_sz << 2) | iv_length;
+}
+
+static unsigned int
+iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
+{
+	return sizeof(struct iavf_security_session);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_capability(struct iavf_security_ctx *iavf_sctx,
+	uint32_t algo, uint32_t type)
+{
+	const struct rte_cryptodev_capabilities *capability;
+	int i = 0;
+
+	capability = &iavf_sctx->crypto_capabilities[i];
+
+	while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+		if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
+			capability->sym.xform_type == type &&
+			capability->sym.cipher.algo == algo)
+			return &capability->sym;
+		/** try next capability */
+		capability = &iavf_crypto_capabilities[i++];
+	}
+
+	return NULL;
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_auth_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
+}
+
+static const struct rte_cryptodev_symmetric_capability *
+get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_CIPHER);
+}
+static const struct rte_cryptodev_symmetric_capability *
+get_aead_capability(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
+}
+
+static uint16_t
+get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_cipher_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_aead_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->cipher.block_size;
+}
+
+static uint16_t
+get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
+	enum rte_crypto_auth_algorithm algo)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, algo);
+	if (capability == NULL)
+		return 0;
+
+	return capability->auth.block_size;
+}
+
+static uint8_t
+calc_context_desc_cipherblock_sz(size_t len)
+{
+	switch (len) {
+	case 8:
+		return 0x2;
+	case 16:
+		return 0x3;
+	default:
+		return 0x0;
+	}
+}
+
+static int
+valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
+{
+	if (len < min || len > max)
+		return false;
+
+	if (increment == 0)
+		return true;
+
+	if ((len - min) % increment)
+		return false;
+
+	/* make sure it fits in the key array */
+	if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
+		return false;
+
+	return true;
+}
+
+static int
+valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_auth_xform *auth)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_auth_capability(iavf_sctx, auth->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(auth->key.length,
+		capability->auth.key_size.min,
+		capability->auth.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_cipher_xform *cipher)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_cipher_capability(iavf_sctx, cipher->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(cipher->key.length,
+		capability->cipher.key_size.min,
+		capability->cipher.key_size.max,
+		capability->cipher.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
+	struct rte_crypto_aead_xform *aead)
+{
+	const struct rte_cryptodev_symmetric_capability *capability;
+
+	capability = get_aead_capability(iavf_sctx, aead->algo);
+	if (capability == NULL)
+		return false;
+
+	/* verify key size */
+	if (!valid_length(aead->key.length,
+		capability->aead.key_size.min,
+		capability->aead.key_size.max,
+		capability->aead.key_size.increment))
+		return false;
+
+	return true;
+}
+
+static int
+iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
+	struct rte_security_session_conf *conf)
+{
+	/** validate security action/protocol selection */
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+		conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
+		PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate IPsec protocol selection */
+	if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
+		return -EINVAL;
+	}
+
+	/** validate selected options */
+	if (conf->ipsec.options.copy_dscp ||
+		conf->ipsec.options.copy_flabel ||
+		conf->ipsec.options.copy_df ||
+		conf->ipsec.options.dec_ttl ||
+		conf->ipsec.options.ecn ||
+		conf->ipsec.options.stats) {
+		PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+		return -EINVAL;
+	}
+
+	/**
+	 * Validate crypto xforms parameters.
+	 *
+	 * AEAD transforms can be used for either inbound/outbound IPsec SAs,
+	 * for non-AEAD crypto transforms we explicitly only support CIPHER/AUTH
+	 * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
+	 */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_auth_xform(iavf_sctx,
+				&conf->crypto_xform->next->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	} else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+		conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+		conf->crypto_xform->next &&
+		conf->crypto_xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+
+		if (!valid_cipher_xform(iavf_sctx,
+				&conf->crypto_xform->next->cipher)) {
+			PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void
+sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_aead_xform *aead, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AEAD;
+
+	switch (aead->algo) {
+	case RTE_CRYPTO_AEAD_AES_CCM:
+		cfg->algo_type = VIRTCHNL_AES_CCM; break;
+	case RTE_CRYPTO_AEAD_AES_GCM:
+		cfg->algo_type = VIRTCHNL_AES_GCM; break;
+	case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+		cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
+		break;
+	}
+
+	cfg->key_len = aead->key.length;
+	cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
+	cfg->digest_len = aead->digest_length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, aead->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_cipher_xform *cipher, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_CIPHER;
+
+	switch (cipher->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		cfg->algo_type = VIRTCHNL_AES_CBC; break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		cfg->algo_type = VIRTCHNL_3DES_CBC; break;
+	case RTE_CRYPTO_CIPHER_NULL:
+		cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		cfg->algo_type = VIRTCHNL_AES_CTR;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid cipher parameters");
+		break;
+	}
+
+	cfg->key_len = cipher->key.length;
+	cfg->iv_len = cipher->iv.length;
+	cfg->salt = salt;
+
+	memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
+}
+
+static void
+sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
+	struct rte_crypto_auth_xform *auth, uint32_t salt)
+{
+	cfg->crypto_type = VIRTCHNL_AUTH;
+
+	switch (auth->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		cfg->algo_type = VIRTCHNL_AES_CMAC; break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		cfg->algo_type = VIRTCHNL_AES_GMAC;
+		cfg->salt = salt;
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid auth parameters");
+		break;
+	}
+
+	cfg->key_len = auth->key.length;
+	/* special case for RTE_CRYPTO_AUTH_AES_GMAC */
+	if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
+		cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
+	else
+		cfg->iv_len = auth->iv.length;
+	cfg->digest_len = auth->digest_length;
+
+	memcpy(cfg->key_data, auth->key.data, cfg->key_len);
+}
+
+/**
+ * Send SA add virtual channel request to Inline IPsec driver.
+ *
+ * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+static uint32_t
+iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
+	struct rte_security_session_conf *conf)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	struct virtchnl_ipsec_sa_cfg *sa_cfg;
+	size_t request_len, response_len;
+
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg);
+
+	request = rte_malloc("iavf-sad-add-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_cfg_resp);
+	response = rte_malloc("iavf-sad-add-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set SA configuration params */
+	sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
+
+	sa_cfg->spi = conf->ipsec.spi;
+	sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
+	sa_cfg->virtchnl_direction =
+		conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+			VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
+
+	if (conf->ipsec.options.esn) {
+		sa_cfg->esn_enabled = 1;
+		sa_cfg->esn_hi = conf->ipsec.esn.hi;
+		sa_cfg->esn_low = conf->ipsec.esn.low;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sa_cfg->udp_encap_enabled = 1;
+
+	/* Set outer IP params */
+	if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
+
+		*((uint32_t *)sa_cfg->dst_addr)	=
+			htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
+	} else {
+		uint32_t *v6_dst_addr =
+			(uint32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr;
+
+		sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
+
+		((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
+		((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
+		((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
+		((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
+	}
+
+	/* set crypto params */
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->aead, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->cipher, conf->ipsec.salt);
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->auth, conf->ipsec.salt);
+
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
+			&conf->crypto_xform->auth, conf->ipsec.salt);
+		if (conf->crypto_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC)
+			sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
+			&conf->crypto_xform->next->cipher, conf->ipsec.salt);
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sa_cfg_resp->sa_handle;
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static void
+set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata *template,
+	struct iavf_security_session *sess)
+{
+	template->sa_idx = sess->sa.hw_idx;
+
+	if (sess->udp_encap.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
+
+	if (sess->esn.enabled)
+		template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
+
+	template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
+	template->ctx_desc_ipsec_params =
+			calc_context_desc_cipherblock_sz(sess->block_sz) |
+			((uint8_t)(sess->icv_sz >> 2) << 3);
+}
+
+static void
+set_session_parameter(struct iavf_security_ctx *iavf_sctx,
+	struct iavf_security_session *sess,
+	struct rte_security_session_conf *conf, uint32_t sa_idx)
+{
+	sess->adapter = iavf_sctx->adapter;
+
+	sess->mode = conf->ipsec.mode;
+	sess->direction = conf->ipsec.direction;
+
+	if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		sess->type = conf->ipsec.tunnel.type;
+
+	sess->sa.spi = conf->ipsec.spi;
+	sess->sa.hw_idx = sa_idx;
+
+	if (conf->ipsec.options.esn) {
+		sess->esn.enabled = 1;
+		sess->esn.value = conf->ipsec.esn.value;
+	}
+
+	if (conf->ipsec.options.udp_encap)
+		sess->udp_encap.enabled = 1;
+
+	if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		sess->block_sz = get_aead_blocksize(iavf_sctx,
+			conf->crypto_xform->aead.algo);
+		sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
+		sess->icv_sz = conf->crypto_xform->aead.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+		sess->block_sz = get_cipher_blocksize(iavf_sctx,
+			conf->crypto_xform->cipher.algo);
+		sess->iv_sz = conf->crypto_xform->cipher.iv.length;
+		sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
+	} else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		if (conf->crypto_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			sess->block_sz = get_auth_blocksize(iavf_sctx,
+				RTE_CRYPTO_SYM_XFORM_AUTH);
+			sess->iv_sz = conf->crypto_xform->auth.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		} else {
+			sess->block_sz = get_cipher_blocksize(iavf_sctx,
+				conf->crypto_xform->next->cipher.algo);
+			sess->iv_sz =
+				conf->crypto_xform->next->cipher.iv.length;
+			sess->icv_sz = conf->crypto_xform->auth.digest_length;
+		}
+	}
+
+	set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
+}
+
+/**
+ * Create IPsec Security Association for inline IPsec Crypto offload.
+ *
+ * 1. validate session configuration parameters
+ * 2. allocate session memory from mempool
+ * 3. add SA to hardware database
+ * 4. set session parameters
+ * 5. create packet metadata template for datapath
+ */
+static int
+iavf_ipsec_crypto_session_create(void *device,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *session,
+				 struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_session = NULL;
+	int sa_idx;
+	int ret = 0;
+
+	/* validate that all SA parameters are valid for device */
+	ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
+	if (ret)
+		return ret;
+
+	/* allocate session context */
+	if (rte_mempool_get(mempool, (void **)&iavf_session)) {
+		PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
+		return -ENOMEM;
+	}
+
+	/* add SA to hardware database */
+	sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
+	if (sa_idx < 0) {
+		PMD_DRV_LOG(ERR,
+			"Failed to add SA (spi: %d, mode: %s, direction: %s)",
+			conf->ipsec.spi,
+			conf->ipsec.mode ==
+				RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
+				"transport" : "tunnel",
+			conf->ipsec.direction ==
+				RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
+				"inbound" : "outbound");
+
+		rte_mempool_put(mempool, iavf_session);
+		return -EFAULT;
+	}
+
+	/* save data plane required session parameters */
+	set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
+
+	/* save to security session private data */
+	set_sec_session_private_data(session, iavf_session);
+
+	return 0;
+}
+
+/**
+ * Check if valid ipsec crypto action.
+ * SPI must be non-zero and SPI in session must match SPI value
+ * passed into function.
+ *
+ * returns: 0 if invalid session or SPI value equal zero
+ * returns: 1 if valid
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_session *sess = session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(sess == NULL || sess->adapter != adapter))
+		return false;
+
+	/* SPI value must be non-zero */
+	if (spi == 0)
+		return false;
+	/* Session SPI must patch flow SPI*/
+	else if (sess->sa.spi == spi) {
+		return true;
+		/**
+		 * TODO: We should add a way of tracking valid hw SA indices to
+		 * make validation less brittle
+		 */
+	}
+
+		return true;
+}
+
+/**
+ * Send virtual channel security policy add request to IES driver.
+ *
+ * IES driver expects SPI and destination IP adderss to be in host
+ * order, but DPDK APIs are network order, therefore we need to do a htonl
+ * conversion of these parameters.
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg);
+	request = rte_malloc("iavf-inbound-security-policy-add-request",
+				request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* ESP SPI */
+	request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
+
+	/* Destination IP  */
+	if (is_v4) {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
+		request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
+	} else {
+		request->ipsec_data.sp_cfg->table_id =
+				VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+		request->ipsec_data.sp_cfg->dip[0] =
+				htonl(((uint32_t *)v6_dst_addr)[0]);
+		request->ipsec_data.sp_cfg->dip[1] =
+				htonl(((uint32_t *)v6_dst_addr)[1]);
+		request->ipsec_data.sp_cfg->dip[2] =
+				htonl(((uint32_t *)v6_dst_addr)[2]);
+		request->ipsec_data.sp_cfg->dip[3] =
+				htonl(((uint32_t *)v6_dst_addr)[3]);
+	}
+
+	request->ipsec_data.sp_cfg->drop = drop;
+
+	/** Traffic Class/Congestion Domain currently not support */
+	request->ipsec_data.sp_cfg->set_tc = 0;
+	request->ipsec_data.sp_cfg->cgd = 0;
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_cfg_resp);
+	response = rte_malloc("iavf-inbound-security-policy-add-response",
+				response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.sp_cfg_resp->rule_id;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_update);
+	request = rte_malloc("iavf-sa-update-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sa-update-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set request params */
+	request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
+	request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		rc = response->ipsec_data.ipsec_resp->resp;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_update(void *device,
+		struct rte_security_session *session,
+		struct rte_security_session_conf *conf)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int rc = 0;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* update esn hi 32-bits */
+	if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
+		/**
+		 * Update ESN in hardware for inbound SA. Store in
+		 * iavf_security_session for outbound SA for use
+		 * in *iavf_ipsec_crypto_pkt_metadata_set* function.
+		 */
+		if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+			rc = iavf_ipsec_crypto_sa_update_esn(adapter,
+					iavf_sess);
+		else
+			iavf_sess->esn.hi = conf->ipsec.esn.hi;
+	}
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
+		struct rte_security_session *session __rte_unused,
+		struct rte_security_stats *stats __rte_unused)
+{
+	return -EOPNOTSUPP;
+}
+
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sp_destroy);
+	request = rte_malloc("iavf-sp-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+	response = rte_malloc("iavf-sp-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* set security policy params */
+	request->ipsec_data.sp_destroy->table_id = is_v4 ?
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
+			VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
+	request->ipsec_data.sp_destroy->rule_id = flow_id;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+	else
+		return response->ipsec_data.ipsec_status->status;
+
+update_cleanup:
+	rte_free(request);
+	rte_free(response);
+
+	return rc;
+}
+
+static uint32_t
+iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
+	struct iavf_security_session *sess)
+{
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+
+	int rc = 0;
+
+	request_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_sa_destroy);
+
+	request = rte_malloc("iavf-sa-del-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_resp);
+
+	response = rte_malloc("iavf-sa-del-response", response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/**
+	 * SA delete supports deletetion of 1-8 specified SA's or if the flag
+	 * field is zero, all SA's associated with VF will be deleted.
+	 */
+	if (sess) {
+		request->ipsec_data.sa_destroy->flag = 0x1;
+		request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
+	} else {
+		request->ipsec_data.sa_destroy->flag = 0x0;
+	}
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id)
+		rc = -EFAULT;
+
+	/**
+	 * Delete status will be the same bitmask as sa_destroy request flag if
+	 * deletes successful
+	 */
+	if (request->ipsec_data.sa_destroy->flag !=
+			response->ipsec_data.ipsec_status->status)
+		rc = -EFAULT;
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+static int
+iavf_ipsec_crypto_session_destroy(void *device,
+		struct rte_security_session *session)
+{
+	struct iavf_adapter *adapter = NULL;
+	struct iavf_security_session *iavf_sess = NULL;
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	int ret;
+
+	adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	iavf_sess = (struct iavf_security_session *)session->sess_private_data;
+
+	/* verify we have a valid session and that it belong to this adapter */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
+	rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
+	return ret;
+}
+
+/**
+ * Get ESP trailer from packet as well as calculate the total ESP trailer
+ * length, which include padding, ESP trailer footer and the ICV
+ */
+static inline struct rte_esp_tail *
+iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t *esp_trailer_length)
+{
+	struct rte_esp_tail *esp_trailer;
+
+	uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
+	uint16_t offset = 0;
+
+	/**
+	 * The ICV will not be present in TSO packets as this is appended by
+	 * hardware during segment generation
+	 */
+	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
+		length -=  s->icv_sz;
+
+	*esp_trailer_length = length;
+
+	/**
+	 * Calculate offset in packet to ESP trailer header, this should be
+	 * total packet length less the size of the ESP trailer plus the ICV
+	 * length if it is present
+	 */
+	offset = rte_pktmbuf_pkt_len(m) - length;
+
+	if (m->nb_segs > 1) {
+		/* find segment which esp trailer is located */
+		while (m->data_len < offset) {
+			offset -= m->data_len;
+			m = m->next;
+		}
+	}
+
+	esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
+
+	*esp_trailer_length += esp_trailer->pad_len;
+
+	return esp_trailer;
+}
+
+static inline uint16_t
+iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
+	struct iavf_security_session *s, uint16_t esp_tlen)
+{
+	uint16_t ol2_len = m->l2_len;	/* MAC + VLAN */
+	uint16_t ol3_len = 0;		/* ipv4/6 + ext hdrs */
+	uint16_t ol4_len = 0;		/* UDP NATT */
+	uint16_t l3_len = 0;		/* IPv4/6 + ext hdrs */
+	uint16_t l4_len = 0;		/* TCP/UDP/STCP hdrs */
+	uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
+
+	if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ol3_len = m->outer_l3_len;
+		/**<
+		 * application provided l3len assumed to include length of
+		 * ipv4/6 hdr + ext hdrs
+		 */
+
+	if (s->udp_encap.enabled)
+		ol4_len = sizeof(struct rte_udp_hdr);
+
+	l3_len = m->l3_len;
+	l4_len = m->l4_len;
+
+	return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
+			esp_hlen + l3_len + l4_len + esp_tlen);
+}
+
+static int
+iavf_ipsec_crypto_pkt_metadata_set(void *device,
+			 struct rte_security_session *session,
+			 struct rte_mbuf *m, void *params)
+{
+	struct rte_eth_dev *ethdev = device;
+	struct iavf_adapter *adapter =
+			IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct iavf_security_session *iavf_sess = session->sess_private_data;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
+	struct rte_esp_tail *esp_tail;
+	uint64_t *sqn = params;
+	uint16_t esp_trailer_length;
+
+	/* Check we have valid session and is associated with this device */
+	if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
+		return -EINVAL;
+
+	/* Get dynamic metadata location from mbuf */
+	md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
+		struct iavf_ipsec_crypto_pkt_metadata *);
+
+	/* Set immutatable metadata values from session template */
+	memcpy(md, &iavf_sess->pkt_metadata_template,
+		sizeof(struct iavf_ipsec_crypto_pkt_metadata));
+
+	esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
+			&esp_trailer_length);
+
+	/* Set per packet mutable metadata values */
+	md->esp_trailer_len = esp_trailer_length;
+	md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
+				iavf_sess, esp_trailer_length);
+	md->next_proto = esp_tail->next_proto;
+
+	/* If Extended SN in use set the upper 32-bits in metadata */
+	if (iavf_sess->esn.enabled && sqn != NULL)
+		md->esn = (uint32_t)(*sqn >> 32);
+
+	return 0;
+}
+
+static int
+iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
+		struct virtchnl_ipsec_cap *capability)
+{
+	/* Perform pf-vf comms */
+	struct inline_ipsec_msg *request = NULL, *response = NULL;
+	size_t request_len, response_len;
+	int rc;
+
+	request_len = sizeof(struct inline_ipsec_msg);
+
+	request = rte_malloc("iavf-device-capability-request", request_len, 0);
+	if (request == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	response_len = sizeof(struct inline_ipsec_msg) +
+			sizeof(struct virtchnl_ipsec_cap);
+	response = rte_malloc("iavf-device-capability-response",
+			response_len, 0);
+	if (response == NULL) {
+		rc = -ENOMEM;
+		goto update_cleanup;
+	}
+
+	/* set msg header params */
+	request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
+	request->req_id = (uint16_t)0xDEADBEEF;
+
+	/* send virtual channel request to add SA to hardware database */
+	rc = iavf_ipsec_crypto_request(adapter,
+			(uint8_t *)request, request_len,
+			(uint8_t *)response, response_len);
+	if (rc)
+		goto update_cleanup;
+
+	/* verify response id */
+	if (response->ipsec_opcode != request->ipsec_opcode ||
+		response->req_id != request->req_id){
+		rc = -EFAULT;
+		goto update_cleanup;
+	}
+	memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
+
+update_cleanup:
+	rte_free(response);
+	rte_free(request);
+
+	return rc;
+}
+
+enum rte_crypto_auth_algorithm auth_maptbl[] = {
+	/* Hash Algorithm */
+	[VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
+	[VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
+	[VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
+	[VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
+	[VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+	[VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
+	[VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
+	[VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
+	[VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
+	[VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
+	[VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
+	[VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+	[VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+	[VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+	[VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+};
+
+static void
+update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
+		struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
+
+	capability->auth.algo = auth_maptbl[acap->algo_type];
+	capability->auth.block_size = acap->block_size;
+
+	capability->auth.key_size.min = acap->min_key_size;
+	capability->auth.key_size.max = acap->max_key_size;
+	capability->auth.key_size.increment = acap->inc_key_size;
+
+	capability->auth.digest_size.min = acap->min_digest_size;
+	capability->auth.digest_size.max = acap->max_digest_size;
+	capability->auth.digest_size.increment = acap->inc_digest_size;
+}
+
+enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
+	/* Cipher Algorithm */
+	[VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
+	[VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
+	[VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
+	[VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
+};
+
+static void
+update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+
+	capability->cipher.algo = cipher_maptbl[acap->algo_type];
+
+	capability->cipher.block_size = acap->block_size;
+
+	capability->cipher.key_size.min = acap->min_key_size;
+	capability->cipher.key_size.max = acap->max_key_size;
+	capability->cipher.key_size.increment = acap->inc_key_size;
+
+	capability->cipher.iv_size.min = acap->min_iv_size;
+	capability->cipher.iv_size.max = acap->max_iv_size;
+	capability->cipher.iv_size.increment = acap->inc_iv_size;
+}
+
+enum rte_crypto_aead_algorithm aead_maptbl[] = {
+	/* AEAD Algorithm */
+	[VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
+	[VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
+	[VIRTCHNL_CHACHA20_POLY1305] = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+};
+
+static void
+update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
+	struct virtchnl_algo_cap *acap)
+{
+	struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
+
+	scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+
+	capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
+
+	capability->aead.algo = aead_maptbl[acap->algo_type];
+
+	capability->aead.block_size = acap->block_size;
+
+	capability->aead.key_size.min = acap->min_key_size;
+	capability->aead.key_size.max = acap->max_key_size;
+	capability->aead.key_size.increment = acap->inc_key_size;
+
+	capability->aead.aad_size.min = acap->min_aad_size;
+	capability->aead.aad_size.max = acap->max_aad_size;
+	capability->aead.aad_size.increment = acap->inc_aad_size;
+
+	capability->aead.iv_size.min = acap->min_iv_size;
+	capability->aead.iv_size.max = acap->max_iv_size;
+	capability->aead.iv_size.increment = acap->inc_iv_size;
+
+	capability->aead.digest_size.min = acap->min_digest_size;
+	capability->aead.digest_size.max = acap->max_digest_size;
+	capability->aead.digest_size.increment = acap->inc_digest_size;
+}
+
+/**
+ * Dynamically set crypto capabilities based on virtchannel IPsec
+ * capabilities structure.
+ */
+int
+iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
+{
+	struct rte_cryptodev_capabilities *capabilities;
+	int i, j, number_of_capabilities = 0, ci = 0;
+
+	/* Count the total number of crypto algorithms supported */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
+		number_of_capabilities += vch_cap->cap[i].algo_cap_num;
+
+	/**
+	 * Allocate cryptodev capabilities structure for
+	 * *number_of_capabilities* items plus one item to null terminate the
+	 * array
+	 */
+	capabilities = rte_zmalloc("crypto_cap",
+		sizeof(struct rte_cryptodev_capabilities) *
+		(number_of_capabilities + 1), 0);
+	capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
+
+	/**
+	 * Iterate over each virtchl crypto capability by crypto type and
+	 * algorithm.
+	 */
+	for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
+		for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
+			switch (vch_cap->cap[i].crypto_type) {
+			case VIRTCHNL_AUTH:
+				update_auth_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_CIPHER:
+				update_cipher_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			case VIRTCHNL_AEAD:
+				update_aead_capabilities(&capabilities[ci],
+					&vch_cap->cap[i].algo_cap_list[j]);
+				break;
+			default:
+				capabilities[ci].op =
+						RTE_CRYPTO_OP_TYPE_UNDEFINED;
+				break;
+			}
+		}
+	}
+
+	iavf_sctx->crypto_capabilities = capabilities;
+	return 0;
+}
+
+/**
+ * Get security capabilities for device
+ */
+static const struct rte_security_capability *
+iavf_ipsec_crypto_capabilities_get(void *device)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	unsigned int i;
+
+	static struct rte_security_capability iavf_security_capabilities[] = {
+		{ /* IPsec Inline Crypto ESP Tunnel Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Tunnel Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1,
+						.stats = 1, .esn = 1 },
+			},
+			.ol_flags = 0
+		},
+		{ /* IPsec Inline Crypto ESP Transport Egress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 },
+			},
+			.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+		},
+		{ /* IPsec Inline Crypto ESP Transport Ingress */
+			.action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+			.ipsec = {
+				.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+				.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+				.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+				.options = { .udp_encap = 1, .stats = 1,
+						.esn = 1 }
+			},
+			.ol_flags = 0
+		},
+		{
+			.action = RTE_SECURITY_ACTION_TYPE_NONE
+		}
+	};
+
+	/**
+	 * Update the security capabilities struct with the runtime discovered
+	 * crypto capabilities, except for last element of the array which is
+	 * the null terminatation
+	 */
+	for (i = 0; i < ((sizeof(iavf_security_capabilities) /
+			sizeof(iavf_security_capabilities[0])) - 1); i++) {
+		iavf_security_capabilities[i].crypto_capabilities =
+			iavf_sctx->crypto_capabilities;
+	}
+
+	return iavf_security_capabilities;
+}
+
+static struct rte_security_ops iavf_ipsec_crypto_ops = {
+	.session_get_size		= iavf_ipsec_crypto_session_size_get,
+	.session_create			= iavf_ipsec_crypto_session_create,
+	.session_update			= iavf_ipsec_crypto_session_update,
+	.session_stats_get		= iavf_ipsec_crypto_session_stats_get,
+	.session_destroy		= iavf_ipsec_crypto_session_destroy,
+	.set_pkt_metadata		= iavf_ipsec_crypto_pkt_metadata_set,
+	.get_userdata			= NULL,
+	.capabilities_get		= iavf_ipsec_crypto_capabilities_get,
+};
+
+int
+iavf_security_ctx_create(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx;
+
+	sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
+	if (sctx == NULL)
+		return -ENOMEM;
+
+	sctx->device = adapter->vf.eth_dev;
+	sctx->ops = &iavf_ipsec_crypto_ops;
+	sctx->sess_cnt = 0;
+
+	adapter->vf.eth_dev->security_ctx = sctx;
+
+	if (adapter->security_ctx == NULL) {
+		adapter->security_ctx = rte_malloc("iavf_security_ctx",
+				sizeof(struct iavf_security_ctx), 0);
+		if (adapter->security_ctx == NULL)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int
+iavf_security_init(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+	struct rte_mbuf_dynfield pkt_md_dynfield = {
+		.name = "iavf_ipsec_crypto_pkt_metadata",
+		.size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
+		.align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
+	};
+	struct virtchnl_ipsec_cap capabilities;
+	int rc;
+
+	iavf_sctx->adapter = adapter;
+
+	iavf_sctx->pkt_md_offset = rte_mbuf_dynfield_register(&pkt_md_dynfield);
+	if (iavf_sctx->pkt_md_offset < 0)
+		return iavf_sctx->pkt_md_offset;
+
+	/* Get device capabilities from Inline IPsec driver over PF-VF comms */
+	rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
+	if (rc)
+		return rc;
+
+	return	iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
+			&capabilities);
+}
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
+{
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	return iavf_sctx->pkt_md_offset;
+}
+
+int
+iavf_security_ctx_destroy(struct iavf_adapter *adapter)
+{
+	struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
+	struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
+
+	if (iavf_sctx == NULL)
+		return -ENODEV;
+
+	/* TODO: Add resources cleanup */
+
+	/* free and reset security data structures */
+	rte_free(iavf_sctx);
+	rte_free(sctx);
+
+	iavf_sctx = NULL;
+	sctx = NULL;
+
+	return 0;
+}
+
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
+{
+	struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
+
+	/** Capability check for IPsec Crypto */
+	if (resources && (resources->vf_cap_flags &
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
+		return true;
+
+	return false;
+}
+
+#define IAVF_IPSEC_INSET_ESP (\
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_AH (\
+	IAVF_INSET_AH_SPI)
+
+#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
+	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
+	IAVF_INSET_ESP_SPI)
+
+#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
+	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
+	IAVF_INSET_ESP_SPI)
+
+enum iavf_ipsec_flow_pt_type {
+	IAVF_PATTERN_ESP = 1,
+	IAVF_PATTERN_AH,
+	IAVF_PATTERN_UDP_ESP,
+};
+enum iavf_ipsec_flow_pt_ip_ver {
+	IAVF_PATTERN_IPV4 = 1,
+	IAVF_PATTERN_IPV6,
+};
+
+#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
+#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
+#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
+
+static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
+	{iavf_pattern_eth_ipv4_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_esp,	IAVF_IPSEC_INSET_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_ah,	IAVF_IPSEC_INSET_AH,
+			IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
+	{iavf_pattern_eth_ipv4_udp_esp,	IAVF_IPSEC_INSET_IPV4_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV4)},
+	{iavf_pattern_eth_ipv6_udp_esp,	IAVF_IPSEC_INSET_IPV6_NATT_ESP,
+			IAVF_PATTERN(IAVF_PATTERN_UDP_ESP, IAVF_PATTERN_IPV6)},
+};
+
+struct iavf_ipsec_flow_item {
+	uint64_t id;
+	uint8_t is_ipv4;
+	uint32_t spi;
+	struct rte_ether_hdr eth_hdr;
+	union {
+		struct rte_ipv4_hdr ipv4_hdr;
+		struct rte_ipv6_hdr ipv6_hdr;
+	};
+	struct rte_udp_hdr udp_hdr;
+};
+
+static void
+parse_eth_item(const struct rte_flow_item_eth *item,
+		struct rte_ether_hdr *eth)
+{
+	memcpy(eth->src_addr.addr_bytes,
+			item->src.addr_bytes, sizeof(eth->src_addr));
+	memcpy(eth->dst_addr.addr_bytes,
+			item->dst.addr_bytes, sizeof(eth->dst_addr));
+}
+
+static void
+parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
+		struct rte_ipv4_hdr *ipv4)
+{
+	ipv4->src_addr = item->hdr.src_addr;
+	ipv4->dst_addr = item->hdr.dst_addr;
+}
+
+static void
+parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
+		struct rte_ipv6_hdr *ipv6)
+{
+	memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
+	memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
+}
+
+static void
+parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr *udp)
+{
+	udp->dst_port = item->hdr.dst_port;
+	udp->src_port = item->hdr.src_port;
+}
+
+static int
+has_security_action(const struct rte_flow_action actions[],
+	const void **session)
+{
+	/* only {SECURITY; END} supported */
+	if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
+		actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
+		*session = actions[0].conf;
+		return true;
+	}
+	return false;
+}
+
+static struct iavf_ipsec_flow_item *
+iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t type)
+{
+	const void *session;
+	struct iavf_ipsec_flow_item
+		*ipsec_flow = rte_malloc("security-flow-rule",
+		sizeof(struct iavf_ipsec_flow_item), 0);
+	enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
+	enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
+
+	if (ipsec_flow == NULL)
+		return NULL;
+
+	ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
+
+	if (pattern[0].spec)
+		parse_eth_item((const struct rte_flow_item_eth *)
+				pattern[0].spec, &ipsec_flow->eth_hdr);
+
+	switch (p_type) {
+	case IAVF_PATTERN_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[2].spec)->hdr.spi;
+		break;
+	case IAVF_PATTERN_AH:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		ipsec_flow->spi =
+			((const struct rte_flow_item_ah *)
+					pattern[2].spec)->spi;
+		break;
+	case IAVF_PATTERN_UDP_ESP:
+		if (ipsec_flow->is_ipv4) {
+			parse_ipv4_item((const struct rte_flow_item_ipv4 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv4_hdr);
+		} else {
+			parse_ipv6_item((const struct rte_flow_item_ipv6 *)
+					pattern[1].spec,
+					&ipsec_flow->ipv6_hdr);
+		}
+		parse_udp_item((const struct rte_flow_item_udp *)
+				pattern[2].spec,
+			&ipsec_flow->udp_hdr);
+		ipsec_flow->spi =
+			((const struct rte_flow_item_esp *)
+					pattern[3].spec)->hdr.spi;
+		break;
+	default:
+		goto flow_cleanup;
+	}
+
+	if (!has_security_action(actions, &session))
+		goto flow_cleanup;
+
+	if (!iavf_ipsec_crypto_action_valid(ethdev, session,
+			ipsec_flow->spi))
+		goto flow_cleanup;
+
+	return ipsec_flow;
+
+flow_cleanup:
+	rte_free(ipsec_flow);
+	return NULL;
+}
+
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser;
+
+static int
+iavf_ipsec_flow_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
+		parser = &iavf_ipsec_flow_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
+}
+
+static int
+iavf_ipsec_flow_create(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		void *meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = meta;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	if (ipsec_flow->is_ipv4) {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			1,
+			ipsec_flow->ipv4_hdr.dst_addr,
+			NULL,
+			0);
+	} else {
+		ipsec_flow->id =
+			iavf_ipsec_crypto_inbound_security_policy_add(ad,
+			ipsec_flow->spi,
+			0,
+			0,
+			ipsec_flow->ipv6_hdr.dst_addr,
+			0);
+	}
+
+	if (ipsec_flow->id < 1) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				"Failed to add SA.");
+		return -rte_errno;
+	}
+
+	flow->rule = ipsec_flow;
+
+	return 0;
+}
+
+static int
+iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
+		struct rte_flow *flow,
+		struct rte_flow_error *error)
+{
+	struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
+	if (!ipsec_flow) {
+		rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"NULL rule.");
+		return -rte_errno;
+	}
+
+	iavf_ipsec_crypto_security_policy_delete(ad,
+			ipsec_flow->is_ipv4, ipsec_flow->id);
+	rte_free(ipsec_flow);
+	return 0;
+}
+
+static struct iavf_flow_engine iavf_ipsec_flow_engine = {
+	.init = iavf_ipsec_flow_init,
+	.uninit = iavf_ipsec_flow_uninit,
+	.create = iavf_ipsec_flow_create,
+	.destroy = iavf_ipsec_flow_destroy,
+	.type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
+};
+
+static int
+iavf_ipsec_flow_parse(struct iavf_adapter *ad,
+		       struct iavf_pattern_match_item *array,
+		       uint32_t array_len,
+		       const struct rte_flow_item pattern[],
+		       const struct rte_flow_action actions[],
+		       void **meta,
+		       struct rte_flow_error *error)
+{
+	struct iavf_pattern_match_item *item = NULL;
+	int ret = -1;
+
+	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
+	if (item && item->meta) {
+		uint32_t type = (uint64_t)(item->meta);
+		struct iavf_ipsec_flow_item *fi =
+				iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
+						pattern, actions, type);
+		if (fi && meta) {
+			*meta = fi;
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static struct iavf_flow_parser iavf_ipsec_flow_parser = {
+	.engine = &iavf_ipsec_flow_engine,
+	.array = iavf_ipsec_flow_pattern,
+	.array_len = RTE_DIM(iavf_ipsec_flow_pattern),
+	.parse_pattern_action = iavf_ipsec_flow_parse,
+	.stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
+};
+
+RTE_INIT(iavf_ipsec_flow_engine_register)
+{
+	iavf_register_flow_engine(&iavf_ipsec_flow_engine);
+}
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h b/drivers/net/iavf/iavf_ipsec_crypto.h
new file mode 100644
index 0000000000..4e4c8798ec
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_H_
+#define _IAVF_IPSEC_CRYPTO_H_
+
+#include <rte_security.h>
+
+#include "iavf.h"
+
+
+
+struct iavf_tx_ipsec_desc {
+	union {
+		struct {
+			__le64 qw0;
+			__le64 qw1;
+		};
+		struct {
+			__le16 l4payload_length;
+			__le32 esn;
+			__le16 trailer_length;
+			u8 type:4;
+			u8 rsv:1;
+			u8 udp:1;
+			u8 ivlen:2;
+			u8 next_header;
+			__le16 ipv6_ext_hdr_length;
+			__le32 said;
+		} __rte_packed;
+	};
+} __rte_packed;
+
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
+#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
+#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
+#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
+			IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
+#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
+#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
+#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
+
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
+#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
+			IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
+
+/* Initialization Vector Length type */
+enum iavf_ipsec_iv_len {
+	IAVF_IPSEC_IV_LEN_NONE,		/* No IV */
+	IAVF_IPSEC_IV_LEN_DW,		/* 4B IV */
+	IAVF_IPSEC_IV_LEN_DDW,		/* 8B IV */
+	IAVF_IPSEC_IV_LEN_QDW,		/* 16B IV */
+};
+
+
+/* IPsec Crypto Packet Metaday offload flags */
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN		(0x1 << 0)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN			(0x1 << 1)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS	(0x1 << 2)
+#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT			(0x1 << 3)
+
+/**
+ * Packet metadata data structure used to hold parameters required by the iAVF
+ * transmit data path. Parameters set for session by calling
+ * rte_security_set_pkt_metadata() API.
+ */
+struct iavf_ipsec_crypto_pkt_metadata {
+	uint32_t sa_idx;                /* SA hardware index (20b/4B) */
+
+	uint8_t ol_flags;		/* flags (1B) */
+	uint8_t len_iv;			/* IV length (2b/1B) */
+	uint8_t ctx_desc_ipsec_params;	/* IPsec params for ctx desc (7b/1B) */
+	uint8_t esp_trailer_len;	/* ESP trailer length (6b/1B) */
+
+	uint16_t l4_payload_len;	/* L4 payload length */
+	uint8_t ipv6_ext_hdrs_len;	/* IPv6 extender headers len (5b/1B) */
+	uint8_t next_proto;		/* Next Protocol (8b/1B) */
+
+	uint32_t esn;		        /* Extended Sequence Number (32b/4B) */
+} __rte_packed;
+
+/**
+ * Inline IPsec Crypto offload is supported
+ */
+int
+iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_ctx_create(struct iavf_adapter *adapter);
+
+/**
+ * Create security context
+ */
+int iavf_security_init(struct iavf_adapter *adapter);
+
+/**
+ * Set security capabilities
+ */
+int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
+		*iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
+
+
+int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+/**
+ * Destroy security context
+ */
+int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
+
+/**
+ * Verify that the inline IPsec Crypto action is valid for this device
+ */
+uint32_t
+iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
+	const struct rte_security_session *session, uint32_t spi);
+
+/**
+ * Add inbound security policy rule to hardware
+ */
+int
+iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
+	uint32_t esp_spi,
+	uint8_t is_v4,
+	rte_be32_t v4_dst_addr,
+	uint8_t *v6_dst_addr,
+	uint8_t drop);
+
+/**
+ * Delete inbound security policy rule from hardware
+ */
+int
+iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
+	uint8_t is_v4, uint32_t flow_id);
+
+int
+iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
+
+#endif /* _IAVF_IPSEC_CRYPTO_H_ */
diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
new file mode 100644
index 0000000000..70ce8dd638
--- /dev/null
+++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
+
+static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
+	{	/* SHA1 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 20,
+					.max = 20,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 48,
+					.max = 48,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 128,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 64,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES XCBC MAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = { 0 },
+				.iv_size = { 0 }
+			}, }
+		}, }
+	},
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* ChaCha20-Poly1305 */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+				.block_size = 16,
+				.key_size = {
+					.min = 32,
+					.max = 32,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES CCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_CCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 240,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_CMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CBC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{	/* NULL (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = { 0 }
+			}, },
+		}, },
+	},
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+	{	/* 3DES CBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	{
+		.op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+	}
+};
+
+
+#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 9663e6514c..1b0b869239 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -27,6 +27,7 @@
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
+#include "iavf_ipsec_crypto.h"
 #include "rte_pmd_iavf.h"
 
 /* Offset of mbuf dynamic field for protocol extraction's metadata */
@@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 uint8_t
 iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
@@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
 		[IAVF_PROTO_XTR_IPV6_FLOW] = IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
 		[IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
 		[IAVF_PROTO_XTR_IP_OFFSET] = IAVF_RXDID_COMMS_AUX_IP_OFFSET,
+		[IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
+				IAVF_RXDID_COMMS_IPSEC_CRYPTO,
 	};
 
 	return flex_type < RTE_DIM(rxdid_map) ?
@@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct iavf_rx_queue *rxq, uint32_t rxdid)
 		rxq->rxd_to_pkt_fields =
 			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
 		break;
+	case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
+		rxq->xtr_ol_flag =
+			rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
+		rxq->rxd_to_pkt_fields =
+			iavf_rxd_to_pkt_fields_by_comms_aux_v2;
+		break;
 	case IAVF_RXDID_COMMS_OVS_1:
 		rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
 		break;
@@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		       const struct rte_eth_txconf *tx_conf)
 {
 	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_tx_queue *txq;
@@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+	if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
 		struct virtchnl_vlan_supported_caps *insertion_support =
-			&vf->vlan_v2_caps.offloads.insertion_support;
+			&adapter->vf.vlan_v2_caps.offloads.insertion_support;
 		uint32_t insertion_cap;
 
 		if (insertion_support->outer)
@@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
+	if (iavf_ipsec_crypto_supported(adapter))
+		txq->ipsec_crypto_pkt_md_offset =
+			iavf_security_get_pkt_md_offset(adapter);
+
 	/* Allocate software ring */
 	txq->sw_ring =
 		rte_zmalloc_socket("iavf tx sw ring",
@@ -1084,6 +1100,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
 #endif
 }
 
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp)
+{
+	volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
+		(volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
+
+	mb->dynfield1[0] = desc->ipsec_said &
+			 IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
+	}
+
+static inline void
+iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
+			  volatile union iavf_rx_flex_desc *rxdp,
+			  struct iavf_ipsec_crypto_stats *stats)
+{
+	uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
+
+	if (status1 & BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
+		uint16_t ipsec_status;
+
+		mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
+
+		ipsec_status = status1 &
+			IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
+
+
+		if (unlikely(ipsec_status !=
+			IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
+			mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+
+			switch (ipsec_status) {
+			case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
+				stats->ierrors.sad_miss++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
+				stats->ierrors.not_processed++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
+				stats->ierrors.icv_check++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
+				stats->ierrors.ipsec_length++;
+				break;
+			case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
+				stats->ierrors.misc++;
+				break;
+}
+
+			stats->ierrors.count++;
+			return;
+		}
+
+		stats->icount++;
+		stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
+
+		if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
+			ipsec_status !=
+				IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
+			iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
+	}
+}
+
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
 iavf_rxd_to_pkt_flags(uint64_t qword)
@@ -1402,6 +1482,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 		rxm->ol_flags |= pkt_flags;
@@ -1544,6 +1626,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 			rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
 		iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+		iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
+				&rxq->stats.ipsec_crypto);
 		rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
@@ -1782,6 +1866,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
 				rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
 			iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+			iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
+				&rxq->stats.ipsec_crypto);
 			rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
@@ -2094,6 +2180,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m)
 	*field |= cmd;
 }
 
+static inline void
+iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+{
+	uint64_t ipsec_field =
+		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+
+	*field |= ipsec_field;
+}
+
+
 static inline void
 iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 		const struct rte_mbuf *m)
@@ -2127,15 +2225,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
 
-	total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+		total_length = ipsec_md->l4_payload_len;
+	} else {
+		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
-	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-		total_length -= m->outer_l3_len;
+		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+			total_length -= m->outer_l3_len;
+	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
 	if (!m->l4_len || !m->tso_segsz)
@@ -2164,7 +2266,8 @@ struct iavf_tx_context_desc_qws {
 
 static inline void
 iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, uint16_t *tlen)
+	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
+	uint16_t *tlen)
 {
 	volatile struct iavf_tx_context_desc_qws *desc_qws =
 			(volatile struct iavf_tx_context_desc_qws *)desc;
@@ -2176,8 +2279,13 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 
 	/* fill segmentation field */
 	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		/* fill IPsec field */
+		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
+				ipsec_md);
+
 		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m);
+				m, ipsec_md);
 	}
 
 	/* fill tunnelling field */
@@ -2191,6 +2299,38 @@ iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
 }
 
 
+static inline void
+iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
+	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
+{
+	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
+		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
+		((uint64_t)md->esp_trailer_len <<
+				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
+
+	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
+		((uint64_t)md->next_proto <<
+				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
+		((uint64_t)(md->len_iv & 0x3) <<
+				IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
+		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+				1ULL : 0ULL) <<
+				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
+		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+
+	/**
+	 * TODO: Pre-calculate this in the Session initialization
+	 *
+	 * Calculate IPsec length required in data descriptor func when TSO
+	 * offload is enabled
+	 */
+	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
+			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
+			sizeof(struct rte_udp_hdr) : 0);
+}
+
 static inline void
 iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		struct rte_mbuf *m)
@@ -2303,6 +2443,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
 }
 
 
+static struct iavf_ipsec_crypto_pkt_metadata *
+iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
+		struct rte_mbuf *m)
+{
+	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
+				struct iavf_ipsec_crypto_pkt_metadata *);
+
+	return NULL;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2331,7 +2482,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 	for (idx = 0; idx < nb_pkts; idx++) {
 		volatile struct iavf_tx_desc *ddesc;
-		uint16_t nb_desc_ctx;
+		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
+
+		uint16_t nb_desc_ctx, nb_desc_ipsec;
 		uint16_t nb_desc_data, nb_desc_required;
 		uint16_t tlen = 0, ipseclen = 0;
 		uint64_t ddesc_template = 0;
@@ -2341,17 +2494,24 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
 		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
+		/**
+		 * Get metadata for ipsec crypto from mbuf dynamic fields if
+		 * security offload is specified.
+		 */
+		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
+
 		nb_desc_data = mb->nb_segs;
 		nb_desc_ctx = !!(mb->ol_flags &
 			(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
 					RTE_MBUF_F_TX_TUNNEL_MASK));
+		nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
 
 		/**
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx;
+		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2402,7 +2562,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				txe->mbuf = NULL;
 			}
 
-			iavf_fill_context_desc(ctx_desc, mb, &tlen);
+			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
 			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
 
 			txe->last_id = desc_idx_last;
@@ -2410,7 +2570,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txe = txn;
 			}
 
+		if (nb_desc_ipsec) {
+			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
+				(volatile struct iavf_tx_ipsec_desc *)
+					&txr[desc_idx];
+
+			txn = &txe_ring[txe->next_id];
+			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
+			if (txe->mbuf) {
+				rte_pktmbuf_free_seg(txe->mbuf);
+				txe->mbuf = NULL;
+		}
+
+			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
+
+			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+		}
 
 		mb_seg = mb;
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1da1278452..b88c81f8f6 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -25,7 +25,8 @@
 
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
-		RTE_ETH_TX_OFFLOAD_TCP_TSO)
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
 		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |		 \
@@ -36,10 +37,10 @@
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
 
 #define IAVF_RX_VECTOR_OFFLOAD (				 \
-		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
-		RTE_ETH_RX_OFFLOAD_VLAN |		 \
-		RTE_ETH_RX_OFFLOAD_RSS_HASH)
+		DEV_RX_OFFLOAD_CHECKSUM |		 \
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
+		DEV_RX_OFFLOAD_VLAN |		 \
+		DEV_RX_OFFLOAD_RSS_HASH)
 
 #define IAVF_VECTOR_PATH 0
 #define IAVF_VECTOR_OFFLOAD_PATH 1
@@ -47,23 +48,26 @@
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          88
+#define IAVF_MIN_TSO_MSS          256
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
 
-#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |		 \
+#define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
+		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG)
 
-#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 |		 \
+#define IAVF_TX_OFFLOAD_MASK (  \
+		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
 		RTE_MBUF_F_TX_OUTER_IPV4 |		 \
 		RTE_MBUF_F_TX_IPV6 |			 \
 		RTE_MBUF_F_TX_IPV4 |			 \
 		RTE_MBUF_F_TX_VLAN |		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
@@ -161,6 +165,24 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_rx_queue_stats {
+	uint64_t reserved;
+	struct iavf_ipsec_crypto_stats ipsec_crypto;
+};
+
 /* Structure associated with each Rx queue. */
 struct iavf_rx_queue {
 	struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
@@ -209,6 +231,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
 				/* handle flexible descriptor by RXDID */
+	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
 };
 
@@ -243,6 +266,7 @@ struct iavf_tx_queue {
 	uint64_t offloads;
 	uint16_t next_dd;              /* next to set RS, for VPMD */
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
+	uint16_t ipsec_crypto_pkt_md_offset;
 
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
@@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
 	} flex_ts;
 };
 
+/* Rx Flex Descriptor
+ * RxDID Profile ID 24 Inline IPsec
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: Inline IPsec SAID lower 16-bits
+ * Flex-field 5: Inline IPsec SAID upper 16-bits
+ */
+struct iavf_32b_rx_flex_desc_comms_ipsec {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flexi_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	__le32 ipsec_said;
+};
+
 /* Receive Flex Descriptor profile IDs: There are a total
  * of 64 profiles where profile IDs 0/1 are for legacy; and
  * profiles 2-63 are flex profiles that can be programmed
@@ -364,6 +422,7 @@ enum iavf_rxdid {
 	IAVF_RXDID_COMMS_AUX_TCP	= 21,
 	IAVF_RXDID_COMMS_OVS_1		= 22,
 	IAVF_RXDID_COMMS_OVS_2		= 23,
+	IAVF_RXDID_COMMS_IPSEC_CRYPTO	= 24,
 	IAVF_RXDID_COMMS_AUX_IP_OFFSET	= 25,
 	IAVF_RXDID_LAST			= 63,
 };
@@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
 
 enum iavf_rx_flex_desc_status_error_1_bits {
 	/* Note: These are predefined bit offsets */
-	IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
-	IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
-	IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+	/* Bits 3:0 are reserved for inline ipsec status */
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
+	IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
+	IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
 	/* [10:6] reserved */
 	IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
 	IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
@@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
 	IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
 };
 
+#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (		\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |	\
+	BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
+
+enum iavf_rx_flex_desc_ipsec_crypto_status {
+	IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
+	IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
+	IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
+	IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
+	IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
+	/* Reserved */
+	IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
+};
+
+
 
 #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT	(0)
 #define IAVF_TXD_DATA_QW1_DTYPE_MASK	(0xFUL << IAVF_TXD_QW1_DTYPE_SHIFT)
@@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 	case IAVF_TX_DESC_DTYPE_CONTEXT:
 		name = "Tx_context_desc";
 		break;
+	case IAVF_TX_DESC_DTYPE_IPSEC:
+		name = "Tx_IPsec_desc";
+		break;
 	default:
 		name = "unknown_desc";
 		break;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index df15e589d4..145b059837 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1776,3 +1776,32 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter)
 
 	return 0;
 }
+
+
+
+int
+iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
+		uint8_t *msg, size_t msg_len,
+		uint8_t *resp_msg, size_t resp_msg_len)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
+	args.in_args = msg;
+	args.in_args_size = msg_len;
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 1);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to execute command %s",
+				"OP_INLINE_IPSEC_CRYPTO");
+		return err;
+	}
+
+	memcpy(resp_msg, args.out_buffer, resp_msg_len);
+
+	return 0;
+}
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 36a82e3faa..5eb230f687 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -5,7 +5,7 @@
 cflags += ['-Wno-strict-aliasing']
 
 includes += include_directories('../../common/iavf')
-deps += ['common_iavf']
+deps += ['common_iavf', 'security', 'cryptodev']
 
 sources = files(
         'iavf_ethdev.c',
@@ -15,6 +15,7 @@ sources = files(
         'iavf_fdir.c',
         'iavf_hash.c',
         'iavf_tm.c',
+        'iavf_ipsec_crypto.c',
 )
 
 if arch_subdir == 'x86'
diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
index 3a045040f1..7426eb9be3 100644
--- a/drivers/net/iavf/rte_pmd_iavf.h
+++ b/drivers/net/iavf/rte_pmd_iavf.h
@@ -92,6 +92,7 @@ extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 
 /**
  * The mbuf dynamic field pointer for flexible descriptor's extraction metadata.
diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
index f3efe756cf..97f0f87311 100644
--- a/drivers/net/iavf/version.map
+++ b/drivers/net/iavf/version.map
@@ -13,4 +13,7 @@ EXPERIMENTAL {
 	rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
 	rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
+
+	# added in 21.11
+	rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (3 preceding siblings ...)
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-10-29 19:32     ` Ferruh Yigit
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add per queue counters for maintaining statistics for inline IPsec
crypto offload, which can be retrieved through the
rte_security_session_stats_get() with more detailed errors through the
rte_ethdev xstats.

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        | 21 ++++++++-
 drivers/net/iavf/iavf_ethdev.c | 84 ++++++++++++++++++++++++++++------
 drivers/net/iavf/iavf_rxtx.h   | 12 -----
 3 files changed, 89 insertions(+), 28 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index bac72590bc..53c99d0f0e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -96,6 +96,25 @@ struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
 
+
+struct iavf_ipsec_crypto_stats {
+	uint64_t icount;
+	uint64_t ibytes;
+	struct {
+		uint64_t count;
+		uint64_t sad_miss;
+		uint64_t not_processed;
+		uint64_t icv_check;
+		uint64_t ipsec_length;
+		uint64_t misc;
+	} ierrors;
+};
+
+struct iavf_eth_xstats {
+	struct virtchnl_eth_stats eth_stats;
+	struct iavf_ipsec_crypto_stats ips_stats;
+};
+
 /* Structure that defines a VSI, associated with a adapter. */
 struct iavf_vsi {
 	struct iavf_adapter *adapter; /* Backreference to associated adapter */
@@ -105,7 +124,7 @@ struct iavf_vsi {
 	uint16_t max_macaddrs;   /* Maximum number of MAC addresses */
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
-	struct virtchnl_eth_stats eth_stats_offset;
+	struct iavf_eth_xstats eth_stats_offset;
 };
 
 struct rte_flow;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index dba505494f..783a10060c 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -90,6 +90,7 @@ static const uint32_t *iavf_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static int iavf_dev_stats_get(struct rte_eth_dev *dev,
 			     struct rte_eth_stats *stats);
 static int iavf_dev_stats_reset(struct rte_eth_dev *dev);
+static int iavf_dev_xstats_reset(struct rte_eth_dev *dev);
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n);
 static int iavf_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -145,21 +146,37 @@ struct rte_iavf_xstats_name_off {
 	unsigned int offset;
 };
 
+#define _OFF_OF(a) offsetof(struct iavf_eth_xstats, a)
 static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
-	{"rx_bytes", offsetof(struct iavf_eth_stats, rx_bytes)},
-	{"rx_unicast_packets", offsetof(struct iavf_eth_stats, rx_unicast)},
-	{"rx_multicast_packets", offsetof(struct iavf_eth_stats, rx_multicast)},
-	{"rx_broadcast_packets", offsetof(struct iavf_eth_stats, rx_broadcast)},
-	{"rx_dropped_packets", offsetof(struct iavf_eth_stats, rx_discards)},
+	{"rx_bytes", _OFF_OF(eth_stats.rx_bytes)},
+	{"rx_unicast_packets", _OFF_OF(eth_stats.rx_unicast)},
+	{"rx_multicast_packets", _OFF_OF(eth_stats.rx_multicast)},
+	{"rx_broadcast_packets", _OFF_OF(eth_stats.rx_broadcast)},
+	{"rx_dropped_packets", _OFF_OF(eth_stats.rx_discards)},
 	{"rx_unknown_protocol_packets", offsetof(struct iavf_eth_stats,
 		rx_unknown_protocol)},
-	{"tx_bytes", offsetof(struct iavf_eth_stats, tx_bytes)},
-	{"tx_unicast_packets", offsetof(struct iavf_eth_stats, tx_unicast)},
-	{"tx_multicast_packets", offsetof(struct iavf_eth_stats, tx_multicast)},
-	{"tx_broadcast_packets", offsetof(struct iavf_eth_stats, tx_broadcast)},
-	{"tx_dropped_packets", offsetof(struct iavf_eth_stats, tx_discards)},
-	{"tx_error_packets", offsetof(struct iavf_eth_stats, tx_errors)},
+	{"tx_bytes", _OFF_OF(eth_stats.tx_bytes)},
+	{"tx_unicast_packets", _OFF_OF(eth_stats.tx_unicast)},
+	{"tx_multicast_packets", _OFF_OF(eth_stats.tx_multicast)},
+	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
+	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
+	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+
+	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
+	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
+	{"inline_ipsec_crypto_ierrors", _OFF_OF(ips_stats.ierrors.count)},
+	{"inline_ipsec_crypto_ierrors_sad_lookup",
+			_OFF_OF(ips_stats.ierrors.sad_miss)},
+	{"inline_ipsec_crypto_ierrors_not_processed",
+			_OFF_OF(ips_stats.ierrors.not_processed)},
+	{"inline_ipsec_crypto_ierrors_icv_fail",
+			_OFF_OF(ips_stats.ierrors.icv_check)},
+	{"inline_ipsec_crypto_ierrors_length",
+			_OFF_OF(ips_stats.ierrors.ipsec_length)},
+	{"inline_ipsec_crypto_ierrors_misc",
+			_OFF_OF(ips_stats.ierrors.misc)},
 };
+#undef _OFF_OF
 
 #define IAVF_NB_XSTATS (sizeof(rte_iavf_stats_strings) / \
 		sizeof(rte_iavf_stats_strings[0]))
@@ -177,7 +194,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.stats_reset                = iavf_dev_stats_reset,
 	.xstats_get                 = iavf_dev_xstats_get,
 	.xstats_get_names           = iavf_dev_xstats_get_names,
-	.xstats_reset               = iavf_dev_stats_reset,
+	.xstats_reset               = iavf_dev_xstats_reset,
 	.promiscuous_enable         = iavf_dev_promiscuous_enable,
 	.promiscuous_disable        = iavf_dev_promiscuous_disable,
 	.allmulticast_enable        = iavf_dev_allmulticast_enable,
@@ -1527,7 +1544,7 @@ iavf_stat_update_32(uint64_t *offset, uint64_t *stat)
 static void
 iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 {
-	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset;
+	struct virtchnl_eth_stats *oes = &vsi->eth_stats_offset.eth_stats;
 
 	iavf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes);
 	iavf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast);
@@ -1589,7 +1606,18 @@ iavf_dev_stats_reset(struct rte_eth_dev *dev)
 		return ret;
 
 	/* set stats offset base on current values */
-	vsi->eth_stats_offset = *pstats;
+	vsi->eth_stats_offset.eth_stats = *pstats;
+
+	return 0;
+}
+
+static int
+iavf_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+	iavf_dev_stats_reset(dev);
+	memset(&vf->vsi.eth_stats_offset, 0, sizeof(struct iavf_eth_xstats));
 
 	return 0;
 }
@@ -1609,6 +1637,27 @@ static int iavf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	return IAVF_NB_XSTATS;
 }
 
+static void
+iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
+		struct iavf_ipsec_crypto_stats *ips)
+{
+	uint16_t idx;
+	for (idx = 0; idx < ethdev->data->nb_rx_queues; idx++) {
+		struct iavf_rx_queue *rxq;
+		struct iavf_ipsec_crypto_stats *stats;
+		rxq = (struct iavf_rx_queue *)ethdev->data->rx_queues[idx];
+		stats = &rxq->stats.ipsec_crypto;
+		ips->icount += stats->icount;
+		ips->ibytes += stats->ibytes;
+		ips->ierrors.count += stats->ierrors.count;
+		ips->ierrors.sad_miss += stats->ierrors.sad_miss;
+		ips->ierrors.not_processed += stats->ierrors.not_processed;
+		ips->ierrors.icv_check += stats->ierrors.icv_check;
+		ips->ierrors.ipsec_length += stats->ierrors.ipsec_length;
+		ips->ierrors.misc += stats->ierrors.misc;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1619,6 +1668,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_eth_xstats iavf_xtats = {0};
 
 	if (n < IAVF_NB_XSTATS)
 		return IAVF_NB_XSTATS;
@@ -1631,11 +1681,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 		return 0;
 
 	iavf_update_stats(vsi, pstats);
+	iavf_xtats.eth_stats = *pstats;
+
+	if (iavf_ipsec_crypto_supported(adapter))
+		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
-		xstats[i].value = *(uint64_t *)(((char *)pstats) +
+		xstats[i].value = *(uint64_t *)(((char *)&iavf_xtats) +
 			rte_iavf_stats_strings[i].offset);
 	}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index b88c81f8f6..c7156d1daa 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -165,18 +165,6 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
-struct iavf_ipsec_crypto_stats {
-	uint64_t icount;
-	uint64_t ibytes;
-	struct {
-		uint64_t count;
-		uint64_t sad_miss;
-		uint64_t not_processed;
-		uint64_t icv_check;
-		uint64_t ipsec_length;
-		uint64_t misc;
-	} ierrors;
-};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (4 preceding siblings ...)
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-28 16:04   ` Radu Nicolau
  2021-11-05 11:54     ` Ferruh Yigit
  2021-10-28 16:05   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
  2021-10-29  2:21   ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
  7 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:04 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Add watchdog to iAVF PMD which support monitoring the VFLR register. If
the device is not already in reset then if a VF reset in progress is
detected then notfiy user through callback and set into reset state.
If the device is already in reset then poll for completion of reset.

The watchdog is disabled by default, to enable it set
IAVF_DEV_WATCHDOG_PERIOD to a non zero value (microseconds)

Signed-off-by: Declan Doherty <declan.doherty@intel.com>
Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  5 ++
 drivers/net/iavf/iavf_ethdev.c | 94 ++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 53c99d0f0e..614cf7d070 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -31,6 +31,8 @@
 
 #define IAVF_NUM_MACADDR_MAX      64
 
+#define IAVF_DEV_WATCHDOG_PERIOD     0
+
 #define IAVF_DEFAULT_RX_PTHRESH      8
 #define IAVF_DEFAULT_RX_HTHRESH      8
 #define IAVF_DEFAULT_RX_WTHRESH      0
@@ -216,6 +218,9 @@ struct iavf_info {
 	int cmd_retval; /* return value of the cmd response from PF */
 	uint8_t *aq_resp; /* buffer to store the adminq response from PF */
 
+	/** iAVF watchdog enable */
+	bool watchdog_enabled;
+
 	/* Event from pf */
 	bool dev_closed;
 	bool link_up;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 783a10060c..ae0f8f17f4 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -25,6 +25,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_dev.h>
+#include <rte_alarm.h>
 
 #include "iavf.h"
 #include "iavf_rxtx.h"
@@ -240,6 +241,91 @@ iavf_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
 	return 0;
 }
 
+__rte_unused
+static int
+iavf_vfr_inprogress(struct iavf_hw *hw)
+{
+	int inprogress = 0;
+
+	if ((IAVF_READ_REG(hw, IAVF_VFGEN_RSTAT) &
+		IAVF_VFGEN_RSTAT_VFR_STATE_MASK) ==
+		VIRTCHNL_VFR_INPROGRESS)
+		inprogress = 1;
+
+	if (inprogress)
+		PMD_DRV_LOG(INFO, "Watchdog detected VFR in progress");
+
+	return inprogress;
+}
+
+__rte_unused
+static void
+iavf_dev_watchdog(void *cb_arg)
+{
+	struct iavf_adapter *adapter = cb_arg;
+	struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
+	int vfr_inprogress = 0, rc = 0;
+
+	/* check if watchdog has been disabled since last call */
+	if (!adapter->vf.watchdog_enabled)
+		return;
+
+	/* If in reset then poll vfr_inprogress register for completion */
+	if (adapter->vf.vf_reset) {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (!vfr_inprogress) {
+			PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed",
+				adapter->vf.eth_dev->data->name);
+			adapter->vf.vf_reset = false;
+		}
+	/* If not in reset then poll vfr_inprogress register for VFLR event */
+	} else {
+		vfr_inprogress = iavf_vfr_inprogress(hw);
+
+		if (vfr_inprogress) {
+			PMD_DRV_LOG(INFO,
+				"VF \"%s\" reset event detected by watchdog",
+				adapter->vf.eth_dev->data->name);
+
+			/* enter reset state with VFLR event */
+			adapter->vf.vf_reset = true;
+
+			rte_eth_dev_callback_process(adapter->vf.eth_dev,
+				RTE_ETH_EVENT_INTR_RESET, NULL);
+		}
+	}
+
+	/* re-alarm watchdog */
+	rc = rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, cb_arg);
+
+	if (rc)
+		PMD_DRV_LOG(ERR, "Failed \"%s\" to reset device watchdog alarm",
+			adapter->vf.eth_dev->data->name);
+}
+
+static void
+iavf_dev_watchdog_enable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Enabling device watchdog");
+	adapter->vf.watchdog_enabled = true;
+	if (rte_eal_alarm_set(IAVF_DEV_WATCHDOG_PERIOD,
+			&iavf_dev_watchdog, (void *)adapter))
+		PMD_DRV_LOG(ERR, "Failed to enabled device watchdog");
+#endif
+}
+
+static void
+iavf_dev_watchdog_disable(struct iavf_adapter *adapter __rte_unused)
+{
+#if (IAVF_DEV_WATCHDOG_PERIOD > 0)
+	PMD_DRV_LOG(INFO, "Disabling device watchdog");
+	adapter->vf.watchdog_enabled = false;
+#endif
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
 			struct rte_ether_addr *mc_addrs,
@@ -2466,6 +2552,11 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 
 	iavf_default_rss_disable(adapter);
 
+
+	/* Start device watchdog */
+	iavf_dev_watchdog_enable(adapter);
+
+
 	return 0;
 
 flow_init_err:
@@ -2549,6 +2640,9 @@ iavf_dev_close(struct rte_eth_dev *dev)
 	if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true))
 		vf->vf_reset = false;
 
+	/* disable watchdog */
+	iavf_dev_watchdog_disable(adapter);
+
 	return ret;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (5 preceding siblings ...)
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-10-28 16:05   ` Radu Nicolau
  2021-10-29 13:27     ` Ferruh Yigit
  2021-10-29  2:21   ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
  7 siblings, 1 reply; 128+ messages in thread
From: Radu Nicolau @ 2021-10-28 16:05 UTC (permalink / raw)
  To: Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Radu Nicolau

Update the PMD doc, feature matrix and release notes with the
new inline crypto feature.

Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
---
 doc/guides/nics/features/iavf.ini      |  2 ++
 doc/guides/nics/intel_vf.rst           | 10 ++++++++++
 doc/guides/rel_notes/release_21_11.rst |  1 +
 3 files changed, 13 insertions(+)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index dd3519e1e2..01f514239e 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -27,6 +27,7 @@ L4 checksum offload  = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
+Inline crypto        = Y
 Basic stats          = Y
 Multiprocess aware   = Y
 FreeBSD              = Y
@@ -65,3 +66,4 @@ mark                 = Y
 passthru             = Y
 queue                = Y
 rss                  = Y
+security             = Y
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index a1e236ad75..fd235e1463 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -633,3 +633,13 @@ Windows Support
 
 *   To load NetUIO driver, follow the steps mentioned in `dpdk-kmods repository
     <https://git.dpdk.org/dpdk-kmods/tree/windows/netuio/README.rst>`_.
+
+
+Inline IPsec Support
+--------------------
+
+*   IAVF PMD supports inline crypto processing depending on the underlying
+    hardware crypto capabilities. IPsec Security Gateway Sample Application
+    supports inline IPsec processing for IAVF PMD. For more details see the
+    IPsec Security Gateway Sample Application and Security library
+    documentation.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 1ccac87b73..9c13ceed1c 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -163,6 +163,7 @@ New Features
   * Added Intel iavf support on Windows.
   * Added IPv4 and L4 (TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added PPPoL2TPv2oUDP RSS hash based on inner IP address and TCP/UDP port.
+  * Added Intel iavf inline crypto support.
 
 * **Updated Intel ice driver.**
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec inline crypto support
  2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
                     ` (6 preceding siblings ...)
  2021-10-28 16:05   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
@ 2021-10-29  2:21   ` Zhang, Qi Z
  7 siblings, 0 replies; 128+ messages in thread
From: Zhang, Qi Z @ 2021-10-29  2:21 UTC (permalink / raw)
  To: Nicolau, Radu
  Cc: dev, Doherty, Declan, Sinha, Abhijit, Wu, Jingjing, Xing, Beilei,
	Richardson, Bruce, Ananyev, Konstantin



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, October 29, 2021 12:05 AM
> Cc: dev@dpdk.org; Doherty, Declan <declan.doherty@intel.com>; Sinha,
> Abhijit <abhijit.sinha@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Richardson, Bruce <bruce.richardson@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Nicolau, Radu <radu.nicolau@intel.com>
> Subject: [PATCH v13 0/7] iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and tunnel over IPv4
> and IPv6, as well as supporting the offload for ESP over UDP, and
> inconjunction with TSO for UDP and TCP flows.
> 
> Radu Nicolau (7):
>   common/iavf: add iAVF IPsec inline crypto support
>   net/iavf: rework Tx path
>   net/iavf: add support for asynchronous virt channel messages
>   net/iavf: add iAVF IPsec inline crypto support
>   net/iavf: add xstats support for inline IPsec crypto
>   net/iavf: add watchdog for VFLR
>   net/iavf: update doc with inline crypto support
> 
>  doc/guides/nics/features/iavf.ini             |    2 +
>  doc/guides/nics/intel_vf.rst                  |   10 +
>  doc/guides/rel_notes/release_21_11.rst        |    1 +
>  drivers/common/iavf/iavf_type.h               |    1 +
>  drivers/common/iavf/virtchnl.h                |   17 +-
>  drivers/common/iavf/virtchnl_inline_ipsec.h   |  553 +++++
>  drivers/net/iavf/iavf.h                       |   61 +-
>  drivers/net/iavf/iavf_ethdev.c                |  219 +-
>  drivers/net/iavf/iavf_generic_flow.c          |   15 +
>  drivers/net/iavf/iavf_generic_flow.h          |    2 +
>  drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
>  drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
>  .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
>  drivers/net/iavf/iavf_rxtx.c                  |  716 +++++--
>  drivers/net/iavf/iavf_rxtx.h                  |  212 +-
>  drivers/net/iavf/iavf_rxtx_vec_sse.c          |   10 +-
>  drivers/net/iavf/iavf_vchnl.c                 |  169 +-
>  drivers/net/iavf/meson.build                  |    3 +-
>  drivers/net/iavf/rte_pmd_iavf.h               |    1 +
>  drivers/net/iavf/version.map                  |    3 +
>  20 files changed, 4113 insertions(+), 319 deletions(-)  create mode 100644
> drivers/common/iavf/virtchnl_inline_ipsec.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> 
> --
> v2: small updates and fixes in the flow related section
> v3: split the huge patch and address feedback
> v4: small changes due to dependencies changes
> v5: updated the watchdow patch
> v6: rebased and updated the common section
> v7: fixed TSO issue and disabled watchdog by default
> v8: rebased to next-net-intel and added doc updates
> v9: fixed IV len for AEAD and GMAC
> v10: removed blank lines at EOF
> v11: rebased patchset
> v12: rebased patchset to RC1
> v13: fixed coding style issues
> 
> 2.25.1

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support
  2021-10-28 16:05   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
@ 2021-10-29 13:27     ` Ferruh Yigit
  0 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-10-29 13:27 UTC (permalink / raw)
  To: Radu Nicolau, Jingjing Wu, Beilei Xing, Haiyue Wang
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev

On 10/28/2021 5:05 PM, Radu Nicolau wrote:
> Update the PMD doc, feature matrix and release notes with the
> new inline crypto feature.
> 
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>

Squashed the patch in next-net to commit where the feature
is implemented, thanks.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
@ 2021-10-29 17:33     ` Ferruh Yigit
  2021-10-30 20:41     ` David Marchand
  1 sibling, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-10-29 17:33 UTC (permalink / raw)
  To: Radu Nicolau, Jingjing Wu, Beilei Xing, Ray Kinsella
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev

On 10/28/2021 5:04 PM, Radu Nicolau wrote:
> Add support for inline crypto for IPsec, for ESP transport and
> tunnel over IPv4 and IPv6, as well as supporting the offload for
> ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
> Implement support for rte_security packet metadata
> 
> Add definition for IPsec descriptors, extend support for offload
> in data and context descriptor to support
> 
> Add support to virtual channel mailbox for IPsec Crypto request
> operations. IPsec Crypto requests receive an initial acknowledgment
> from phsyical function driver of receipt of request and then an
> asynchronous response with success/failure of request including any
> response data.
> 
> Add enhanced descriptor debugging
> 
> Refactor of scalar tx burst function to support integration of offload
> 
> Signed-off-by: Declan Doherty <declan.doherty@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.sinha@intel.com>
> Signed-off-by: Radu Nicolau <radu.nicolau@intel.com>
> Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>

<...>

> @@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
>   	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
>   		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
>   
> +	if (iavf_ipsec_crypto_supported(adapter)) {
> +		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;

Should use new macors with RTE_ prefix

<...>

> @@ -36,10 +37,10 @@
>   		RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
>   
>   #define IAVF_RX_VECTOR_OFFLOAD (				 \
> -		RTE_ETH_RX_OFFLOAD_CHECKSUM |		 \
> -		RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |		 \
> -		RTE_ETH_RX_OFFLOAD_VLAN |		 \
> -		RTE_ETH_RX_OFFLOAD_RSS_HASH)
> +		DEV_RX_OFFLOAD_CHECKSUM |		 \
> +		DEV_RX_OFFLOAD_SCTP_CKSUM |		 \
> +		DEV_RX_OFFLOAD_VLAN |		 \
> +		DEV_RX_OFFLOAD_RSS_HASH)

And should keep the RTE_ prefix version of the macros.

Updating both above two in next-net.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
@ 2021-10-29 19:32     ` Ferruh Yigit
  0 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-10-29 19:32 UTC (permalink / raw)
  To: Radu Nicolau, Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, David Marchand

On 10/28/2021 5:04 PM, Radu Nicolau wrote:
> @@ -1619,6 +1668,7 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
>   	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
>   	struct iavf_vsi *vsi = &vf->vsi;
>   	struct virtchnl_eth_stats *pstats = NULL;
> +	struct iavf_eth_xstats iavf_xtats = {0};

Build error [1] on FreeBSD reported by David M, will fix it in next-net [2]


[1]
../drivers/net/iavf/iavf_ethdev.c:1757:39: error: suggest braces around initialization of subobject [-Werror,-Wmissing-braces]
         struct iavf_eth_xstats iavf_xtats = {0};
                                              ^
                                              {}

[2]
  -       struct iavf_eth_xstats iavf_xtats = {0};
  +       struct iavf_eth_xstats iavf_xtats = {{0}};


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
@ 2021-10-29 20:33     ` Ferruh Yigit
  0 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-10-29 20:33 UTC (permalink / raw)
  To: Radu Nicolau, Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, David Marchand

On 10/28/2021 5:04 PM, Radu Nicolau wrote:
> @@ -339,15 +340,35 @@ _clear_cmd(struct iavf_info *vf)
>   static inline int
>   _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
>   {
> -	int ret = rte_atomic32_cmpset((volatile uint32_t *)&vf->pend_cmd,
> -		VIRTCHNL_OP_UNKNOWN, ops);
> +	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
> +	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
> +			&op_unk, &ops,
> +			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
>   
>   	if (!ret)
>   		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
>   
> +	__atomic_store_n(&vf->pend_cmd_count, 1, __ATOMIC_RELAXED);
> +
>   	return !ret;
>   }
>   
> +/* Check there is pending cmd in execution. If none, set new command. */
> +static inline int
> +_atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
> +{
> +	enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
> +	int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
> +			&op_unk, &ops,
> +			0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
> +
> +	if (!ret)
> +		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
> +
> +	__atomic_store_n(&vf->pend_cmd_count, 2, __ATOMIC_RELAXED);
> +
> +	return !ret;
> +}

David reported build error on Windows [1], fixing in next-net [2].
@Radu can you please confirm the latest code in the next-net?



[1]
../drivers/net/iavf/iavf_rxtx_vec_avx512.c
In file included from ../drivers/net/iavf/iavf_rxtx_vec_avx512.c:5:
In file included from ..\drivers\net\iavf/iavf_rxtx_vec_common.h:11:
..\drivers\net\iavf/iavf.h:376:4: error: passing 'enum virtchnl_ops *' to parameter of type 'uint32_t *' (aka 'unsigned int *') converts between pointers to integer types with different sign [-Werror,-Wpointer-sign]
                         &op_unk, &ops,
                         ^~~~~~~
..\drivers\net\iavf/iavf.h:376:13: error: passing 'enum virtchnl_ops *' to parameter of type 'volatile uint32_t *' (aka 'volatile unsigned int *') converts between pointers to integer types with different sign [-Werror,-Wpointer-sign]
                         &op_unk, &ops,
                                  ^~~~
..\drivers\net\iavf/iavf.h:393:4: error: passing 'enum virtchnl_ops *' to parameter of type 'uint32_t *' (aka 'unsigned int *') converts between pointers to integer types with different sign [-Werror,-Wpointer-sign]
                         &op_unk, &ops,
                         ^~~~~~~
..\drivers\net\iavf/iavf.h:393:13: error: passing 'enum virtchnl_ops *' to parameter of type 'volatile uint32_t *' (aka 'volatile unsigned int *') converts between pointers to integer types with different sign [-Werror,-Wpointer-sign]
                         &op_unk, &ops,
                                  ^~~~
4 errors generated.


[2]
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 42fb66d71193..f413dbed833a 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -372,8 +372,7 @@ static inline int
  _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
  {
         enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
-       int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
-                       &op_unk, &ops,
+       int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
                         0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);

         if (!ret)
@@ -389,8 +388,7 @@ static inline int
  _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
  {
         enum virtchnl_ops op_unk = VIRTCHNL_OP_UNKNOWN;
-       int ret = __atomic_compare_exchange((volatile uint32_t *)&vf->pend_cmd,
-                       &op_unk, &ops,
+       int ret = __atomic_compare_exchange(&vf->pend_cmd, &op_unk, &ops,
                         0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);

         if (!ret)

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
  2021-10-29 17:33     ` Ferruh Yigit
@ 2021-10-30 20:41     ` David Marchand
  2021-11-01 10:45       ` Ferruh Yigit
  1 sibling, 1 reply; 128+ messages in thread
From: David Marchand @ 2021-10-30 20:41 UTC (permalink / raw)
  To: Radu Nicolau, Yigit, Ferruh
  Cc: Jingjing Wu, Beilei Xing, Ray Kinsella, dev, Declan Doherty,
	abhijit.sinha, Qi Zhang, Bruce Richardson, Ananyev, Konstantin

On Thu, Oct 28, 2021 at 6:21 PM Radu Nicolau <radu.nicolau@intel.com> wrote:
> +static const struct rte_cryptodev_symmetric_capability *
> +get_capability(struct iavf_security_ctx *iavf_sctx,
> +       uint32_t algo, uint32_t type)
> +{
> +       const struct rte_cryptodev_capabilities *capability;
> +       int i = 0;
> +
> +       capability = &iavf_sctx->crypto_capabilities[i];
> +
> +       while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
> +               if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
> +                       capability->sym.xform_type == type &&
> +                       capability->sym.cipher.algo == algo)
> +                       return &capability->sym;
> +               /** try next capability */
> +               capability = &iavf_crypto_capabilities[i++];
> +       }
> +
> +       return NULL;
> +}

As of cc13af13c8e6 ("net/ngbe: support Tx done cleanup"), next-net
build is still KO for Windows:
http://mails.dpdk.org/archives/test-report/2021-October/236938.html

FAILED: drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
"clang" "-Idrivers\libtmp_rte_net_iavf.a.p" "-Idrivers" "-I..\drivers"
"-Idrivers\net\iavf" "-I..\drivers\net\iavf" "-Idrivers\common\iavf"
"-I..\drivers\common\iavf" "-Ilib\ethdev" "-I..\lib\ethdev" "-I."
"-I.." "-Iconfig" "-I..\config" "-Ilib\eal\include"
"-I..\lib\eal\include" "-Ilib\eal\windows\include"
"-I..\lib\eal\windows\include" "-Ilib\eal\x86\include"
"-I..\lib\eal\x86\include" "-Ilib\eal\common" "-I..\lib\eal\common"
"-Ilib\eal" "-I..\lib\eal" "-Ilib\kvargs" "-I..\lib\kvargs"
"-Ilib\net" "-I..\lib\net" "-Ilib\mbuf" "-I..\lib\mbuf"
"-Ilib\mempool" "-I..\lib\mempool" "-Ilib\ring" "-I..\lib\ring"
"-Ilib\metrics" "-I..\lib\metrics" "-Ilib\telemetry"
"-I..\lib\telemetry" "-Ilib\meter" "-I..\lib\meter"
"-Idrivers\bus\pci" "-I..\drivers\bus\pci"
"-I..\drivers\bus\pci\windows" "-Ilib\pci" "-I..\lib\pci"
"-Idrivers\bus\vdev" "-I..\drivers\bus\vdev" "-Ilib\security"
"-I..\lib\security" "-Ilib\cryptodev" "-I..\lib\cryptodev" "-Ilib\rcu"
"-I..\lib\rcu" "-Xclang" "-fcolor-diagnostics" "-pipe"
"-D_FILE_OFFSET_BITS=64" "-Wall" "-Winvalid-pch" "-Werror" "-O3"
"-include" "rte_config.h" "-Wextra" "-Wcast-qual" "-Wdeprecated"
"-Wformat" "-Wformat-nonliteral" "-Wformat-security"
"-Wmissing-declarations" "-Wmissing-prototypes" "-Wnested-externs"
"-Wold-style-definition" "-Wpointer-arith" "-Wsign-compare"
"-Wstrict-prototypes" "-Wundef" "-Wwrite-strings"
"-Wno-address-of-packed-member" "-Wno-missing-field-initializers"
"-D_GNU_SOURCE" "-D_WIN32_WINNT=0x0A00" "-D_CRT_SECURE_NO_WARNINGS"
"-march=native" "-DALLOW_EXPERIMENTAL_API" "-DALLOW_INTERNAL_API"
"-Wno-strict-aliasing" "-DCC_AVX2_SUPPORT" "-DCC_AVX512_SUPPORT"
"-DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.iavf" -MD -MQ
drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj -MF
"drivers\libtmp_rte_net_iavf.a.p\net_iavf_iavf_ipsec_crypto.c.obj.d"
-o drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
"-c" ../drivers/net/iavf/iavf_ipsec_crypto.c
../drivers/net/iavf/iavf_ipsec_crypto.c:111:31: error: comparison of
integers of different signs: 'const enum rte_crypto_sym_xform_type'
and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
                        capability->sym.xform_type == type &&
                        ~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
../drivers/net/iavf/iavf_ipsec_crypto.c:112:32: error: comparison of
integers of different signs: 'const enum rte_crypto_cipher_algorithm'
and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
                        capability->sym.cipher.algo == algo)
                        ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
2 errors generated.


-- 
David Marchand


^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-10-30 20:41     ` David Marchand
@ 2021-11-01 10:45       ` Ferruh Yigit
  2021-11-01 11:36         ` Ferruh Yigit
  0 siblings, 1 reply; 128+ messages in thread
From: Ferruh Yigit @ 2021-11-01 10:45 UTC (permalink / raw)
  To: David Marchand, Radu Nicolau
  Cc: Jingjing Wu, Beilei Xing, Ray Kinsella, dev, Declan Doherty,
	abhijit.sinha, Qi Zhang, Bruce Richardson, Ananyev, Konstantin

On 10/30/2021 9:41 PM, David Marchand wrote:
> On Thu, Oct 28, 2021 at 6:21 PM Radu Nicolau <radu.nicolau@intel.com> wrote:
>> +static const struct rte_cryptodev_symmetric_capability *
>> +get_capability(struct iavf_security_ctx *iavf_sctx,
>> +       uint32_t algo, uint32_t type)
>> +{
>> +       const struct rte_cryptodev_capabilities *capability;
>> +       int i = 0;
>> +
>> +       capability = &iavf_sctx->crypto_capabilities[i];
>> +
>> +       while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>> +               if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
>> +                       capability->sym.xform_type == type &&
>> +                       capability->sym.cipher.algo == algo)
>> +                       return &capability->sym;
>> +               /** try next capability */
>> +               capability = &iavf_crypto_capabilities[i++];
>> +       }
>> +
>> +       return NULL;
>> +}
> 
> As of cc13af13c8e6 ("net/ngbe: support Tx done cleanup"), next-net
> build is still KO for Windows:
> http://mails.dpdk.org/archives/test-report/2021-October/236938.html
> 
> FAILED: drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
> "clang" "-Idrivers\libtmp_rte_net_iavf.a.p" "-Idrivers" "-I..\drivers"
> "-Idrivers\net\iavf" "-I..\drivers\net\iavf" "-Idrivers\common\iavf"
> "-I..\drivers\common\iavf" "-Ilib\ethdev" "-I..\lib\ethdev" "-I."
> "-I.." "-Iconfig" "-I..\config" "-Ilib\eal\include"
> "-I..\lib\eal\include" "-Ilib\eal\windows\include"
> "-I..\lib\eal\windows\include" "-Ilib\eal\x86\include"
> "-I..\lib\eal\x86\include" "-Ilib\eal\common" "-I..\lib\eal\common"
> "-Ilib\eal" "-I..\lib\eal" "-Ilib\kvargs" "-I..\lib\kvargs"
> "-Ilib\net" "-I..\lib\net" "-Ilib\mbuf" "-I..\lib\mbuf"
> "-Ilib\mempool" "-I..\lib\mempool" "-Ilib\ring" "-I..\lib\ring"
> "-Ilib\metrics" "-I..\lib\metrics" "-Ilib\telemetry"
> "-I..\lib\telemetry" "-Ilib\meter" "-I..\lib\meter"
> "-Idrivers\bus\pci" "-I..\drivers\bus\pci"
> "-I..\drivers\bus\pci\windows" "-Ilib\pci" "-I..\lib\pci"
> "-Idrivers\bus\vdev" "-I..\drivers\bus\vdev" "-Ilib\security"
> "-I..\lib\security" "-Ilib\cryptodev" "-I..\lib\cryptodev" "-Ilib\rcu"
> "-I..\lib\rcu" "-Xclang" "-fcolor-diagnostics" "-pipe"
> "-D_FILE_OFFSET_BITS=64" "-Wall" "-Winvalid-pch" "-Werror" "-O3"
> "-include" "rte_config.h" "-Wextra" "-Wcast-qual" "-Wdeprecated"
> "-Wformat" "-Wformat-nonliteral" "-Wformat-security"
> "-Wmissing-declarations" "-Wmissing-prototypes" "-Wnested-externs"
> "-Wold-style-definition" "-Wpointer-arith" "-Wsign-compare"
> "-Wstrict-prototypes" "-Wundef" "-Wwrite-strings"
> "-Wno-address-of-packed-member" "-Wno-missing-field-initializers"
> "-D_GNU_SOURCE" "-D_WIN32_WINNT=0x0A00" "-D_CRT_SECURE_NO_WARNINGS"
> "-march=native" "-DALLOW_EXPERIMENTAL_API" "-DALLOW_INTERNAL_API"
> "-Wno-strict-aliasing" "-DCC_AVX2_SUPPORT" "-DCC_AVX512_SUPPORT"
> "-DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.iavf" -MD -MQ
> drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj -MF
> "drivers\libtmp_rte_net_iavf.a.p\net_iavf_iavf_ipsec_crypto.c.obj.d"
> -o drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
> "-c" ../drivers/net/iavf/iavf_ipsec_crypto.c
> ../drivers/net/iavf/iavf_ipsec_crypto.c:111:31: error: comparison of
> integers of different signs: 'const enum rte_crypto_sym_xform_type'
> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>                          capability->sym.xform_type == type &&
>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
> ../drivers/net/iavf/iavf_ipsec_crypto.c:112:32: error: comparison of
> integers of different signs: 'const enum rte_crypto_cipher_algorithm'
> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>                          capability->sym.cipher.algo == algo)
>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
> 2 errors generated.
> 
> 

Thanks for the report, I will update in next-net as following:

diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 19e703e6895d..935f436ac4f1 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -108,8 +108,8 @@ get_capability(struct iavf_security_ctx *iavf_sctx,
  
         while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
                 if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
-                       capability->sym.xform_type == type &&
-                       capability->sym.cipher.algo == algo)
+                       capability->sym.xform_type == (int)type &&
+                       capability->sym.cipher.algo == (int)algo)
                         return &capability->sym;
                 /** try next capability */
                 capability = &iavf_crypto_capabilities[i++];

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-11-01 10:45       ` Ferruh Yigit
@ 2021-11-01 11:36         ` Ferruh Yigit
  2021-11-01 11:41           ` Ferruh Yigit
  0 siblings, 1 reply; 128+ messages in thread
From: Ferruh Yigit @ 2021-11-01 11:36 UTC (permalink / raw)
  To: David Marchand, Radu Nicolau
  Cc: Jingjing Wu, Beilei Xing, Ray Kinsella, dev, Declan Doherty,
	abhijit.sinha, Qi Zhang, Bruce Richardson, Ananyev, Konstantin

On 11/1/2021 10:45 AM, Ferruh Yigit wrote:
> On 10/30/2021 9:41 PM, David Marchand wrote:
>> On Thu, Oct 28, 2021 at 6:21 PM Radu Nicolau <radu.nicolau@intel.com> wrote:
>>> +static const struct rte_cryptodev_symmetric_capability *
>>> +get_capability(struct iavf_security_ctx *iavf_sctx,
>>> +       uint32_t algo, uint32_t type)
>>> +{
>>> +       const struct rte_cryptodev_capabilities *capability;
>>> +       int i = 0;
>>> +
>>> +       capability = &iavf_sctx->crypto_capabilities[i];
>>> +
>>> +       while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>>> +               if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
>>> +                       capability->sym.xform_type == type &&
>>> +                       capability->sym.cipher.algo == algo)
>>> +                       return &capability->sym;
>>> +               /** try next capability */
>>> +               capability = &iavf_crypto_capabilities[i++];
>>> +       }
>>> +
>>> +       return NULL;
>>> +}
>>
>> As of cc13af13c8e6 ("net/ngbe: support Tx done cleanup"), next-net
>> build is still KO for Windows:
>> http://mails.dpdk.org/archives/test-report/2021-October/236938.html
>>
>> FAILED: drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
>> "clang" "-Idrivers\libtmp_rte_net_iavf.a.p" "-Idrivers" "-I..\drivers"
>> "-Idrivers\net\iavf" "-I..\drivers\net\iavf" "-Idrivers\common\iavf"
>> "-I..\drivers\common\iavf" "-Ilib\ethdev" "-I..\lib\ethdev" "-I."
>> "-I.." "-Iconfig" "-I..\config" "-Ilib\eal\include"
>> "-I..\lib\eal\include" "-Ilib\eal\windows\include"
>> "-I..\lib\eal\windows\include" "-Ilib\eal\x86\include"
>> "-I..\lib\eal\x86\include" "-Ilib\eal\common" "-I..\lib\eal\common"
>> "-Ilib\eal" "-I..\lib\eal" "-Ilib\kvargs" "-I..\lib\kvargs"
>> "-Ilib\net" "-I..\lib\net" "-Ilib\mbuf" "-I..\lib\mbuf"
>> "-Ilib\mempool" "-I..\lib\mempool" "-Ilib\ring" "-I..\lib\ring"
>> "-Ilib\metrics" "-I..\lib\metrics" "-Ilib\telemetry"
>> "-I..\lib\telemetry" "-Ilib\meter" "-I..\lib\meter"
>> "-Idrivers\bus\pci" "-I..\drivers\bus\pci"
>> "-I..\drivers\bus\pci\windows" "-Ilib\pci" "-I..\lib\pci"
>> "-Idrivers\bus\vdev" "-I..\drivers\bus\vdev" "-Ilib\security"
>> "-I..\lib\security" "-Ilib\cryptodev" "-I..\lib\cryptodev" "-Ilib\rcu"
>> "-I..\lib\rcu" "-Xclang" "-fcolor-diagnostics" "-pipe"
>> "-D_FILE_OFFSET_BITS=64" "-Wall" "-Winvalid-pch" "-Werror" "-O3"
>> "-include" "rte_config.h" "-Wextra" "-Wcast-qual" "-Wdeprecated"
>> "-Wformat" "-Wformat-nonliteral" "-Wformat-security"
>> "-Wmissing-declarations" "-Wmissing-prototypes" "-Wnested-externs"
>> "-Wold-style-definition" "-Wpointer-arith" "-Wsign-compare"
>> "-Wstrict-prototypes" "-Wundef" "-Wwrite-strings"
>> "-Wno-address-of-packed-member" "-Wno-missing-field-initializers"
>> "-D_GNU_SOURCE" "-D_WIN32_WINNT=0x0A00" "-D_CRT_SECURE_NO_WARNINGS"
>> "-march=native" "-DALLOW_EXPERIMENTAL_API" "-DALLOW_INTERNAL_API"
>> "-Wno-strict-aliasing" "-DCC_AVX2_SUPPORT" "-DCC_AVX512_SUPPORT"
>> "-DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.iavf" -MD -MQ
>> drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj -MF
>> "drivers\libtmp_rte_net_iavf.a.p\net_iavf_iavf_ipsec_crypto.c.obj.d"
>> -o drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
>> "-c" ../drivers/net/iavf/iavf_ipsec_crypto.c
>> ../drivers/net/iavf/iavf_ipsec_crypto.c:111:31: error: comparison of
>> integers of different signs: 'const enum rte_crypto_sym_xform_type'
>> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>>                          capability->sym.xform_type == type &&
>>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
>> ../drivers/net/iavf/iavf_ipsec_crypto.c:112:32: error: comparison of
>> integers of different signs: 'const enum rte_crypto_cipher_algorithm'
>> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>>                          capability->sym.cipher.algo == algo)
>>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
>> 2 errors generated.
>>
>>
> 
> Thanks for the report, I will update in next-net as following:
> 
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
> index 19e703e6895d..935f436ac4f1 100644
> --- a/drivers/net/iavf/iavf_ipsec_crypto.c
> +++ b/drivers/net/iavf/iavf_ipsec_crypto.c
> @@ -108,8 +108,8 @@ get_capability(struct iavf_security_ctx *iavf_sctx,
> 
>          while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>                  if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
> -                       capability->sym.xform_type == type &&
> -                       capability->sym.cipher.algo == algo)
> +                       capability->sym.xform_type == (int)type &&
> +                       capability->sym.cipher.algo == (int)algo)
>                          return &capability->sym;
>                  /** try next capability */
>                  capability = &iavf_crypto_capabilities[i++];

Hmm, with this some other compilers are failing with same error,
it looks like compilers can't agree on the sign of the enum.

According standard, enum type is implementation specific

'
N1570 Committee Draft — April 12, 2011 ISO/IEC 9899:201x

6.7.2.2 Enumeration specifiers
4 Each enumerated type shall be compatible with char, a signed integer type, or an
unsigned integer type. The choice of type is implementation-defined,128) but shall be
capable of representing the values of all the members of the enumeration.
'

I will cast both enum and value to 'int', although it looks ugly.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support
  2021-11-01 11:36         ` Ferruh Yigit
@ 2021-11-01 11:41           ` Ferruh Yigit
  0 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-11-01 11:41 UTC (permalink / raw)
  To: David Marchand, Radu Nicolau
  Cc: Jingjing Wu, Beilei Xing, Ray Kinsella, dev, Declan Doherty,
	abhijit.sinha, Qi Zhang, Bruce Richardson, Ananyev, Konstantin

On 11/1/2021 11:36 AM, Ferruh Yigit wrote:
> On 11/1/2021 10:45 AM, Ferruh Yigit wrote:
>> On 10/30/2021 9:41 PM, David Marchand wrote:
>>> On Thu, Oct 28, 2021 at 6:21 PM Radu Nicolau <radu.nicolau@intel.com> wrote:
>>>> +static const struct rte_cryptodev_symmetric_capability *
>>>> +get_capability(struct iavf_security_ctx *iavf_sctx,
>>>> +       uint32_t algo, uint32_t type)
>>>> +{
>>>> +       const struct rte_cryptodev_capabilities *capability;
>>>> +       int i = 0;
>>>> +
>>>> +       capability = &iavf_sctx->crypto_capabilities[i];
>>>> +
>>>> +       while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>>>> +               if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
>>>> +                       capability->sym.xform_type == type &&
>>>> +                       capability->sym.cipher.algo == algo)
>>>> +                       return &capability->sym;
>>>> +               /** try next capability */
>>>> +               capability = &iavf_crypto_capabilities[i++];
>>>> +       }
>>>> +
>>>> +       return NULL;
>>>> +}
>>>
>>> As of cc13af13c8e6 ("net/ngbe: support Tx done cleanup"), next-net
>>> build is still KO for Windows:
>>> http://mails.dpdk.org/archives/test-report/2021-October/236938.html
>>>
>>> FAILED: drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
>>> "clang" "-Idrivers\libtmp_rte_net_iavf.a.p" "-Idrivers" "-I..\drivers"
>>> "-Idrivers\net\iavf" "-I..\drivers\net\iavf" "-Idrivers\common\iavf"
>>> "-I..\drivers\common\iavf" "-Ilib\ethdev" "-I..\lib\ethdev" "-I."
>>> "-I.." "-Iconfig" "-I..\config" "-Ilib\eal\include"
>>> "-I..\lib\eal\include" "-Ilib\eal\windows\include"
>>> "-I..\lib\eal\windows\include" "-Ilib\eal\x86\include"
>>> "-I..\lib\eal\x86\include" "-Ilib\eal\common" "-I..\lib\eal\common"
>>> "-Ilib\eal" "-I..\lib\eal" "-Ilib\kvargs" "-I..\lib\kvargs"
>>> "-Ilib\net" "-I..\lib\net" "-Ilib\mbuf" "-I..\lib\mbuf"
>>> "-Ilib\mempool" "-I..\lib\mempool" "-Ilib\ring" "-I..\lib\ring"
>>> "-Ilib\metrics" "-I..\lib\metrics" "-Ilib\telemetry"
>>> "-I..\lib\telemetry" "-Ilib\meter" "-I..\lib\meter"
>>> "-Idrivers\bus\pci" "-I..\drivers\bus\pci"
>>> "-I..\drivers\bus\pci\windows" "-Ilib\pci" "-I..\lib\pci"
>>> "-Idrivers\bus\vdev" "-I..\drivers\bus\vdev" "-Ilib\security"
>>> "-I..\lib\security" "-Ilib\cryptodev" "-I..\lib\cryptodev" "-Ilib\rcu"
>>> "-I..\lib\rcu" "-Xclang" "-fcolor-diagnostics" "-pipe"
>>> "-D_FILE_OFFSET_BITS=64" "-Wall" "-Winvalid-pch" "-Werror" "-O3"
>>> "-include" "rte_config.h" "-Wextra" "-Wcast-qual" "-Wdeprecated"
>>> "-Wformat" "-Wformat-nonliteral" "-Wformat-security"
>>> "-Wmissing-declarations" "-Wmissing-prototypes" "-Wnested-externs"
>>> "-Wold-style-definition" "-Wpointer-arith" "-Wsign-compare"
>>> "-Wstrict-prototypes" "-Wundef" "-Wwrite-strings"
>>> "-Wno-address-of-packed-member" "-Wno-missing-field-initializers"
>>> "-D_GNU_SOURCE" "-D_WIN32_WINNT=0x0A00" "-D_CRT_SECURE_NO_WARNINGS"
>>> "-march=native" "-DALLOW_EXPERIMENTAL_API" "-DALLOW_INTERNAL_API"
>>> "-Wno-strict-aliasing" "-DCC_AVX2_SUPPORT" "-DCC_AVX512_SUPPORT"
>>> "-DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.iavf" -MD -MQ
>>> drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj -MF
>>> "drivers\libtmp_rte_net_iavf.a.p\net_iavf_iavf_ipsec_crypto.c.obj.d"
>>> -o drivers/libtmp_rte_net_iavf.a.p/net_iavf_iavf_ipsec_crypto.c.obj
>>> "-c" ../drivers/net/iavf/iavf_ipsec_crypto.c
>>> ../drivers/net/iavf/iavf_ipsec_crypto.c:111:31: error: comparison of
>>> integers of different signs: 'const enum rte_crypto_sym_xform_type'
>>> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>>>                          capability->sym.xform_type == type &&
>>>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
>>> ../drivers/net/iavf/iavf_ipsec_crypto.c:112:32: error: comparison of
>>> integers of different signs: 'const enum rte_crypto_cipher_algorithm'
>>> and 'uint32_t' (aka 'unsigned int') [-Werror,-Wsign-compare]
>>>                          capability->sym.cipher.algo == algo)
>>>                          ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^  ~~~~
>>> 2 errors generated.
>>>
>>>
>>
>> Thanks for the report, I will update in next-net as following:
>>
>> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
>> index 19e703e6895d..935f436ac4f1 100644
>> --- a/drivers/net/iavf/iavf_ipsec_crypto.c
>> +++ b/drivers/net/iavf/iavf_ipsec_crypto.c
>> @@ -108,8 +108,8 @@ get_capability(struct iavf_security_ctx *iavf_sctx,
>>
>>          while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
>>                  if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
>> -                       capability->sym.xform_type == type &&
>> -                       capability->sym.cipher.algo == algo)
>> +                       capability->sym.xform_type == (int)type &&
>> +                       capability->sym.cipher.algo == (int)algo)
>>                          return &capability->sym;
>>                  /** try next capability */
>>                  capability = &iavf_crypto_capabilities[i++];
> 
> Hmm, with this some other compilers are failing with same error,
> it looks like compilers can't agree on the sign of the enum.
> 
> According standard, enum type is implementation specific
> 
> '
> N1570 Committee Draft — April 12, 2011 ISO/IEC 9899:201x
> 
> 6.7.2.2 Enumeration specifiers
> 4 Each enumerated type shall be compatible with char, a signed integer type, or an
> unsigned integer type. The choice of type is implementation-defined,128) but shall be
> capable of representing the values of all the members of the enumeration.
> '
> 
> I will cast both enum and value to 'int', although it looks ugly.

Or only cast enum to 'uint32_t' as suggested by Radu.

^ permalink raw reply	[flat|nested] 128+ messages in thread

* Re: [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR
  2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
@ 2021-11-05 11:54     ` Ferruh Yigit
  0 siblings, 0 replies; 128+ messages in thread
From: Ferruh Yigit @ 2021-11-05 11:54 UTC (permalink / raw)
  To: Radu Nicolau, Jingjing Wu, Beilei Xing
  Cc: dev, declan.doherty, abhijit.sinha, qi.z.zhang, bruce.richardson,
	konstantin.ananyev, Thomas Monjalon

On 10/28/2021 5:04 PM, Radu Nicolau wrote:
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index 783a10060c..ae0f8f17f4 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -25,6 +25,7 @@
>   #include <rte_malloc.h>
>   #include <rte_memzone.h>
>   #include <rte_dev.h>
> +#include <rte_alarm.h>

Duplicated include, 'rte_alarm.h' is already included above.

Reported by Thomas, and will be fixed in main, thanks.

^ permalink raw reply	[flat|nested] 128+ messages in thread

end of thread, other threads:[~2021-11-05 11:54 UTC | newest]

Thread overview: 128+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-09 14:24 [dpdk-dev] [PATCH 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-09-09 14:24 ` [dpdk-dev] [PATCH 1/4] common/iavf: " Radu Nicolau
2021-09-09 14:24 ` [dpdk-dev] [PATCH 2/4] net/iavf: " Radu Nicolau
2021-09-09 14:24 ` [dpdk-dev] [PATCH 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
2021-09-09 14:24 ` [dpdk-dev] [PATCH 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
2021-09-15 13:32 ` [dpdk-dev] [PATCH v2 0/4] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 1/4] common/iavf: " Radu Nicolau
2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 2/4] net/iavf: " Radu Nicolau
2021-09-18  5:28     ` Wu, Jingjing
2021-09-20 13:44       ` Nicolau, Radu
2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 3/4] net/iavf: Add xstats support for inline IPsec crypto Radu Nicolau
2021-09-15 13:32   ` [dpdk-dev] [PATCH v2 4/4] net/iavf: add watchdog for VFLR Radu Nicolau
2021-09-20 13:51 ` [dpdk-dev] [PATCH v3 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 1/6] common/iavf: " Radu Nicolau
2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 2/6] net/iavf: rework tx path Radu Nicolau
2021-09-20 13:51   ` [dpdk-dev] [PATCH v3 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-09-20 13:52   ` [dpdk-dev] [PATCH v3 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-01  9:51 ` [dpdk-dev] [PATCH v4 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 1/6] common/iavf: " Radu Nicolau
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 2/6] net/iavf: rework tx path Radu Nicolau
2021-10-04  1:24     ` Wu, Jingjing
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-04  1:34     ` Wu, Jingjing
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-04  1:50     ` Wu, Jingjing
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-04  2:01     ` Wu, Jingjing
2021-10-01  9:51   ` [dpdk-dev] [PATCH v4 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-04  2:15     ` Wu, Jingjing
2021-10-04 11:18       ` Nicolau, Radu
2021-10-04 14:21         ` Nicolau, Radu
2021-10-08  6:19         ` Wu, Jingjing
2021-10-08 10:09           ` Nicolau, Radu
2021-10-06  9:28 ` [dpdk-dev] [PATCH v5 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 1/6] common/iavf: " Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 2/6] net/iavf: rework tx path Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-06  9:28   ` [dpdk-dev] [PATCH v5 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-08 10:19 ` [dpdk-dev] [PATCH v6 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-08 10:19   ` [dpdk-dev] [PATCH v6 1/6] common/iavf: " Radu Nicolau
2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 2/6] net/iavf: rework tx path Radu Nicolau
2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-08 10:20   ` [dpdk-dev] [PATCH v6 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-13 15:33 ` [dpdk-dev] [PATCH v7 0/6] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 1/6] common/iavf: " Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 2/6] net/iavf: rework tx path Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 3/6] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 4/6] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 5/6] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-13 15:33   ` [dpdk-dev] [PATCH v7 6/6] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-15 10:15 ` [dpdk-dev] [PATCH v8 0/7] iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 1/7] common/iavf: " Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-18  5:34     ` Wu, Jingjing
2021-10-15 10:15   ` [dpdk-dev] [PATCH v8 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-18 10:10 ` [dpdk-dev] [PATCH v9 0/7] iavf: add iAVF IPsec " Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 1/7] common/iavf: " Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-18 10:10   ` [dpdk-dev] [PATCH v9 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-19  9:23 ` [dpdk-dev] [PATCH v10 0/7] iavf: add iAVF IPsec " Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 1/7] common/iavf: " Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-19  9:23   ` [dpdk-dev] [PATCH v10 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-26 10:38 ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 1/7] common/iavf: " Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-26 10:38   ` [dpdk-dev] [PATCH v11 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-26 12:30   ` [dpdk-dev] [PATCH v11 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
2021-10-26 13:56 ` [dpdk-dev] [PATCH v12 " Radu Nicolau
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 1/7] common/iavf: " Radu Nicolau
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-27  0:43     ` Zhang, Qi Z
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-27  0:36     ` Zhang, Qi Z
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-26 13:56   ` [dpdk-dev] [PATCH v12 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-27  0:36   ` [dpdk-dev] [PATCH v12 0/7] iavf: add iAVF IPsec " Zhang, Qi Z
2021-10-28 14:47   ` Ferruh Yigit
2021-10-28 15:52 ` [dpdk-dev] [PATCH v13 " Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework tx path Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-10-28 15:52   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-28 16:04 ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Radu Nicolau
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 1/7] common/iavf: " Radu Nicolau
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 2/7] net/iavf: rework Tx path Radu Nicolau
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 3/7] net/iavf: add support for asynchronous virt channel messages Radu Nicolau
2021-10-29 20:33     ` Ferruh Yigit
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 4/7] net/iavf: add iAVF IPsec inline crypto support Radu Nicolau
2021-10-29 17:33     ` Ferruh Yigit
2021-10-30 20:41     ` David Marchand
2021-11-01 10:45       ` Ferruh Yigit
2021-11-01 11:36         ` Ferruh Yigit
2021-11-01 11:41           ` Ferruh Yigit
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 5/7] net/iavf: add xstats support for inline IPsec crypto Radu Nicolau
2021-10-29 19:32     ` Ferruh Yigit
2021-10-28 16:04   ` [dpdk-dev] [PATCH v13 6/7] net/iavf: add watchdog for VFLR Radu Nicolau
2021-11-05 11:54     ` Ferruh Yigit
2021-10-28 16:05   ` [dpdk-dev] [PATCH v13 7/7] net/iavf: update doc with inline crypto support Radu Nicolau
2021-10-29 13:27     ` Ferruh Yigit
2021-10-29  2:21   ` [dpdk-dev] [PATCH v13 0/7] iavf: add iAVF IPsec " Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).