DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junfeng Guo <junfeng.guo@intel.com>
To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,
	jingjing.wu@intel.com, beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
	Xiao Wang <xiao.w.wang@intel.com>
Subject: [PATCH v8 01/14] common/idpf: introduce common library
Date: Thu, 20 Oct 2022 14:29:38 +0800	[thread overview]
Message-ID: <20221020062951.645121-2-junfeng.guo@intel.com> (raw)
In-Reply-To: <20221020062951.645121-1-junfeng.guo@intel.com>

Introduce common library for IDPF (Infrastructure Data Path Function)
PMD.

Also add OS specific implementation about some MACRO definitions and
small functions which are specific for DPDK.

Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
 drivers/common/idpf/idpf_alloc.h            |   22 +
 drivers/common/idpf/idpf_common.c           |  364 +++
 drivers/common/idpf/idpf_controlq.c         |  691 +++++
 drivers/common/idpf/idpf_controlq.h         |  224 ++
 drivers/common/idpf/idpf_controlq_api.h     |  234 ++
 drivers/common/idpf/idpf_controlq_setup.c   |  179 ++
 drivers/common/idpf/idpf_devids.h           |   18 +
 drivers/common/idpf/idpf_lan_pf_regs.h      |  134 +
 drivers/common/idpf/idpf_lan_txrx.h         |  428 +++
 drivers/common/idpf/idpf_lan_vf_regs.h      |  114 +
 drivers/common/idpf/idpf_osdep.h            |  374 +++
 drivers/common/idpf/idpf_prototype.h        |   45 +
 drivers/common/idpf/idpf_type.h             |  106 +
 drivers/common/idpf/meson.build             |   14 +
 drivers/common/idpf/siov_regs.h             |   47 +
 drivers/common/idpf/version.map             |   12 +
 drivers/common/idpf/virtchnl.h              | 2866 +++++++++++++++++++
 drivers/common/idpf/virtchnl2.h             | 1462 ++++++++++
 drivers/common/idpf/virtchnl2_lan_desc.h    |  606 ++++
 drivers/common/idpf/virtchnl_inline_ipsec.h |  567 ++++
 drivers/common/meson.build                  |    1 +
 21 files changed, 8508 insertions(+)
 create mode 100644 drivers/common/idpf/idpf_alloc.h
 create mode 100644 drivers/common/idpf/idpf_common.c
 create mode 100644 drivers/common/idpf/idpf_controlq.c
 create mode 100644 drivers/common/idpf/idpf_controlq.h
 create mode 100644 drivers/common/idpf/idpf_controlq_api.h
 create mode 100644 drivers/common/idpf/idpf_controlq_setup.c
 create mode 100644 drivers/common/idpf/idpf_devids.h
 create mode 100644 drivers/common/idpf/idpf_lan_pf_regs.h
 create mode 100644 drivers/common/idpf/idpf_lan_txrx.h
 create mode 100644 drivers/common/idpf/idpf_lan_vf_regs.h
 create mode 100644 drivers/common/idpf/idpf_osdep.h
 create mode 100644 drivers/common/idpf/idpf_prototype.h
 create mode 100644 drivers/common/idpf/idpf_type.h
 create mode 100644 drivers/common/idpf/meson.build
 create mode 100644 drivers/common/idpf/siov_regs.h
 create mode 100644 drivers/common/idpf/version.map
 create mode 100644 drivers/common/idpf/virtchnl.h
 create mode 100644 drivers/common/idpf/virtchnl2.h
 create mode 100644 drivers/common/idpf/virtchnl2_lan_desc.h
 create mode 100644 drivers/common/idpf/virtchnl_inline_ipsec.h

diff --git a/drivers/common/idpf/idpf_alloc.h b/drivers/common/idpf/idpf_alloc.h
new file mode 100644
index 0000000000..bc054851b3
--- /dev/null
+++ b/drivers/common/idpf/idpf_alloc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_ALLOC_H_
+#define _IDPF_ALLOC_H_
+
+/* Memory types */
+enum idpf_memset_type {
+	IDPF_NONDMA_MEM = 0,
+	IDPF_DMA_MEM
+};
+
+/* Memcpy types */
+enum idpf_memcpy_type {
+	IDPF_NONDMA_TO_NONDMA = 0,
+	IDPF_NONDMA_TO_DMA,
+	IDPF_DMA_TO_DMA,
+	IDPF_DMA_TO_NONDMA
+};
+
+#endif /* _IDPF_ALLOC_H_ */
diff --git a/drivers/common/idpf/idpf_common.c b/drivers/common/idpf/idpf_common.c
new file mode 100644
index 0000000000..9efb0ea37a
--- /dev/null
+++ b/drivers/common/idpf/idpf_common.c
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#include "idpf_type.h"
+#include "idpf_prototype.h"
+#include "virtchnl.h"
+
+
+/**
+ * idpf_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ */
+int idpf_set_mac_type(struct idpf_hw *hw)
+{
+	int status = IDPF_SUCCESS;
+
+	DEBUGFUNC("Set MAC type\n");
+
+	if (hw->vendor_id == IDPF_INTEL_VENDOR_ID) {
+		switch (hw->device_id) {
+		case IDPF_DEV_ID_PF:
+			hw->mac.type = IDPF_MAC_PF;
+			break;
+		case IDPF_DEV_ID_VF:
+			hw->mac.type = IDPF_MAC_VF;
+			break;
+		default:
+			hw->mac.type = IDPF_MAC_GENERIC;
+			break;
+		}
+	} else {
+		status = IDPF_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	DEBUGOUT2("Setting MAC type found mac: %d, returns: %d\n",
+		  hw->mac.type, status);
+	return status;
+}
+
+/**
+ *  idpf_init_hw - main initialization routine
+ *  @hw: pointer to the hardware structure
+ *  @ctlq_size: struct to pass ctlq size data
+ */
+int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size)
+{
+	struct idpf_ctlq_create_info *q_info;
+	int status = IDPF_SUCCESS;
+	struct idpf_ctlq_info *cq = NULL;
+
+	/* Setup initial control queues */
+	q_info = (struct idpf_ctlq_create_info *)
+		 idpf_calloc(hw, 2, sizeof(struct idpf_ctlq_create_info));
+	if (!q_info)
+		return IDPF_ERR_NO_MEMORY;
+
+	q_info[0].type             = IDPF_CTLQ_TYPE_MAILBOX_TX;
+	q_info[0].buf_size         = ctlq_size.asq_buf_size;
+	q_info[0].len              = ctlq_size.asq_ring_size;
+	q_info[0].id               = -1; /* default queue */
+
+	if (hw->mac.type == IDPF_MAC_PF) {
+		q_info[0].reg.head         = PF_FW_ATQH;
+		q_info[0].reg.tail         = PF_FW_ATQT;
+		q_info[0].reg.len          = PF_FW_ATQLEN;
+		q_info[0].reg.bah          = PF_FW_ATQBAH;
+		q_info[0].reg.bal          = PF_FW_ATQBAL;
+		q_info[0].reg.len_mask     = PF_FW_ATQLEN_ATQLEN_M;
+		q_info[0].reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
+		q_info[0].reg.head_mask    = PF_FW_ATQH_ATQH_M;
+	} else {
+		q_info[0].reg.head         = VF_ATQH;
+		q_info[0].reg.tail         = VF_ATQT;
+		q_info[0].reg.len          = VF_ATQLEN;
+		q_info[0].reg.bah          = VF_ATQBAH;
+		q_info[0].reg.bal          = VF_ATQBAL;
+		q_info[0].reg.len_mask     = VF_ATQLEN_ATQLEN_M;
+		q_info[0].reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
+		q_info[0].reg.head_mask    = VF_ATQH_ATQH_M;
+	}
+
+	q_info[1].type             = IDPF_CTLQ_TYPE_MAILBOX_RX;
+	q_info[1].buf_size         = ctlq_size.arq_buf_size;
+	q_info[1].len              = ctlq_size.arq_ring_size;
+	q_info[1].id               = -1; /* default queue */
+
+	if (hw->mac.type == IDPF_MAC_PF) {
+		q_info[1].reg.head         = PF_FW_ARQH;
+		q_info[1].reg.tail         = PF_FW_ARQT;
+		q_info[1].reg.len          = PF_FW_ARQLEN;
+		q_info[1].reg.bah          = PF_FW_ARQBAH;
+		q_info[1].reg.bal          = PF_FW_ARQBAL;
+		q_info[1].reg.len_mask     = PF_FW_ARQLEN_ARQLEN_M;
+		q_info[1].reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
+		q_info[1].reg.head_mask    = PF_FW_ARQH_ARQH_M;
+	} else {
+		q_info[1].reg.head         = VF_ARQH;
+		q_info[1].reg.tail         = VF_ARQT;
+		q_info[1].reg.len          = VF_ARQLEN;
+		q_info[1].reg.bah          = VF_ARQBAH;
+		q_info[1].reg.bal          = VF_ARQBAL;
+		q_info[1].reg.len_mask     = VF_ARQLEN_ARQLEN_M;
+		q_info[1].reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
+		q_info[1].reg.head_mask    = VF_ARQH_ARQH_M;
+	}
+
+	status = idpf_ctlq_init(hw, 2, q_info);
+	if (status != IDPF_SUCCESS) {
+		/* TODO return error */
+		idpf_free(hw, q_info);
+		return status;
+	}
+
+	LIST_FOR_EACH_ENTRY(cq, &hw->cq_list_head, idpf_ctlq_info, cq_list) {
+		if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+			hw->asq = cq;
+		else if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
+			hw->arq = cq;
+	}
+
+	/* TODO hardcode a mac addr for now */
+	hw->mac.addr[0] = 0x00;
+	hw->mac.addr[1] = 0x00;
+	hw->mac.addr[2] = 0x00;
+	hw->mac.addr[3] = 0x00;
+	hw->mac.addr[4] = 0x03;
+	hw->mac.addr[5] = 0x14;
+
+	return IDPF_SUCCESS;
+}
+
+/**
+ * idpf_send_msg_to_cp
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to CP. By default, this message
+ * is sent asynchronously, i.e. idpf_asq_send_command() does not wait for
+ * completion before returning.
+ */
+int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,
+			int v_retval, u8 *msg, u16 msglen)
+{
+	struct idpf_ctlq_msg ctlq_msg = { 0 };
+	struct idpf_dma_mem dma_mem = { 0 };
+	int status;
+
+	ctlq_msg.opcode = idpf_mbq_opc_send_msg_to_pf;
+	ctlq_msg.func_id = 0;
+	ctlq_msg.data_len = msglen;
+	ctlq_msg.cookie.mbx.chnl_retval = v_retval;
+	ctlq_msg.cookie.mbx.chnl_opcode = v_opcode;
+
+	if (msglen > 0) {
+		dma_mem.va = (struct idpf_dma_mem *)
+			  idpf_alloc_dma_mem(hw, &dma_mem, msglen);
+		if (!dma_mem.va)
+			return IDPF_ERR_NO_MEMORY;
+
+		idpf_memcpy(dma_mem.va, msg, msglen, IDPF_NONDMA_TO_DMA);
+		ctlq_msg.ctx.indirect.payload = &dma_mem;
+	}
+	status = idpf_ctlq_send(hw, hw->asq, 1, &ctlq_msg);
+
+	if (dma_mem.va)
+		idpf_free_dma_mem(hw, &dma_mem);
+
+	return status;
+}
+
+/**
+ *  idpf_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns true if the firmware has processed all descriptors on the
+ *  admin send queue. Returns false if there are still requests pending.
+ */
+bool idpf_asq_done(struct idpf_hw *hw)
+{
+	/* AQ designers suggest use of head for better
+	 * timing reliability than DD bit
+	 */
+	return rd32(hw, hw->asq->reg.head) == hw->asq->next_to_use;
+}
+
+/**
+ * idpf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ */
+bool idpf_check_asq_alive(struct idpf_hw *hw)
+{
+	if (hw->asq->reg.len)
+		return !!(rd32(hw, hw->asq->reg.len) &
+			  PF_FW_ATQLEN_ATQENABLE_M);
+
+	return false;
+}
+
+/**
+ *  idpf_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ */
+int idpf_clean_arq_element(struct idpf_hw *hw,
+			   struct idpf_arq_event_info *e, u16 *pending)
+{
+	struct idpf_ctlq_msg msg = { 0 };
+	int status;
+
+	*pending = 1;
+
+	status = idpf_ctlq_recv(hw->arq, pending, &msg);
+
+	/* ctlq_msg does not align to ctlq_desc, so copy relevant data here */
+	e->desc.opcode = msg.opcode;
+	e->desc.cookie_high = msg.cookie.mbx.chnl_opcode;
+	e->desc.cookie_low = msg.cookie.mbx.chnl_retval;
+	e->desc.ret_val = msg.status;
+	e->desc.datalen = msg.data_len;
+	if (msg.data_len > 0) {
+		e->buf_len = msg.data_len;
+		idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg.data_len,
+			    IDPF_DMA_TO_NONDMA);
+	}
+	return status;
+}
+
+/**
+ *  idpf_deinit_hw - shutdown routine
+ *  @hw: pointer to the hardware structure
+ */
+int idpf_deinit_hw(struct idpf_hw *hw)
+{
+	hw->asq = NULL;
+	hw->arq = NULL;
+
+	return idpf_ctlq_deinit(hw);
+}
+
+/**
+ * idpf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a RESET message to the CPF. Does not wait for response from CPF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the control queue should be shut down and (optionally) reinitialized.
+ */
+int idpf_reset(struct idpf_hw *hw)
+{
+	return idpf_send_msg_to_cp(hw, VIRTCHNL_OP_RESET_VF,
+				      IDPF_SUCCESS, NULL, 0);
+}
+
+/**
+ * idpf_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ */
+STATIC int idpf_get_set_rss_lut(struct idpf_hw *hw, u16 vsi_id,
+				bool pf_lut, u8 *lut, u16 lut_size,
+				bool set)
+{
+	/* TODO fill out command */
+	return IDPF_SUCCESS;
+}
+
+/**
+ * idpf_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ */
+int idpf_get_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
+		     u8 *lut, u16 lut_size)
+{
+	return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false);
+}
+
+/**
+ * idpf_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ */
+int idpf_set_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
+		     u8 *lut, u16 lut_size)
+{
+	return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * idpf_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ */
+STATIC int idpf_get_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
+				struct idpf_get_set_rss_key_data *key,
+				bool set)
+{
+	/* TODO fill out command */
+	return IDPF_SUCCESS;
+}
+
+/**
+ * idpf_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ */
+int idpf_get_rss_key(struct idpf_hw *hw, u16 vsi_id,
+		     struct idpf_get_set_rss_key_data *key)
+{
+	return idpf_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * idpf_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ */
+int idpf_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
+		     struct idpf_get_set_rss_key_data *key)
+{
+	return idpf_get_set_rss_key(hw, vsi_id, key, true);
+}
+
+RTE_LOG_REGISTER_DEFAULT(idpf_common_logger, NOTICE);
diff --git a/drivers/common/idpf/idpf_controlq.c b/drivers/common/idpf/idpf_controlq.c
new file mode 100644
index 0000000000..da3692df7d
--- /dev/null
+++ b/drivers/common/idpf/idpf_controlq.c
@@ -0,0 +1,691 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void
+idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+		     struct idpf_ctlq_create_info *q_create_info)
+{
+	/* set head and tail registers in our local struct */
+	cq->reg.head = q_create_info->reg.head;
+	cq->reg.tail = q_create_info->reg.tail;
+	cq->reg.len = q_create_info->reg.len;
+	cq->reg.bah = q_create_info->reg.bah;
+	cq->reg.bal = q_create_info->reg.bal;
+	cq->reg.len_mask = q_create_info->reg.len_mask;
+	cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+	cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * idpf_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+				bool is_rxq)
+{
+	/* Update tail to post pre-allocated buffers for rx queues */
+	if (is_rxq)
+		wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+
+	/* For non-Mailbox control queues only TAIL need to be set */
+	if (cq->q_id != -1)
+		return;
+
+	/* Clear Head for both send or receive */
+	wr32(hw, cq->reg.head, 0);
+
+	/* set starting point */
+	wr32(hw, cq->reg.bal, IDPF_LO_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.bah, IDPF_HI_DWORD(cq->desc_ring.pa));
+	wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	for (i = 0; i < cq->ring_size; i++) {
+		struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+		struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+		/* No buffer to post to descriptor, continue */
+		if (!bi)
+			continue;
+
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+		desc->opcode = 0;
+		desc->datalen = (__le16)CPU_TO_LE16(bi->size);
+		desc->ret_val = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(bi->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(bi->pa));
+		desc->params.indirect.param0 = 0;
+		desc->params.indirect.param1 = 0;
+	}
+}
+
+/**
+ * idpf_ctlq_shutdown - shutdown the CQ
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for any controq queue
+ */
+static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (!cq->ring_size)
+		goto shutdown_sq_out;
+
+#ifdef SIMICS_BUILD
+	wr32(hw, cq->reg.head, 0);
+	wr32(hw, cq->reg.tail, 0);
+	wr32(hw, cq->reg.len, 0);
+	wr32(hw, cq->reg.bal, 0);
+	wr32(hw, cq->reg.bah, 0);
+#endif /* SIMICS_BUILD */
+
+	/* free ring buffers and the ring itself */
+	idpf_ctlq_dealloc_ring_res(hw, cq);
+
+	/* Set ring_size to 0 to indicate uninitialized queue */
+	cq->ring_size = 0;
+
+shutdown_sq_out:
+	idpf_release_lock(&cq->cq_lock);
+	idpf_destroy_lock(&cq->cq_lock);
+}
+
+/**
+ * idpf_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ *
+ * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+		  struct idpf_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq_out)
+{
+	bool is_rxq = false;
+	int status = IDPF_SUCCESS;
+
+	if (!qinfo->len || !qinfo->buf_size ||
+	    qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
+	    qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
+		return IDPF_ERR_CFG;
+
+	*cq_out = (struct idpf_ctlq_info *)
+		idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+	if (!(*cq_out))
+		return IDPF_ERR_NO_MEMORY;
+
+	(*cq_out)->cq_type = qinfo->type;
+	(*cq_out)->q_id = qinfo->id;
+	(*cq_out)->buf_size = qinfo->buf_size;
+	(*cq_out)->ring_size = qinfo->len;
+
+	(*cq_out)->next_to_use = 0;
+	(*cq_out)->next_to_clean = 0;
+	(*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
+
+	switch (qinfo->type) {
+	case IDPF_CTLQ_TYPE_MAILBOX_RX:
+		is_rxq = true;
+#ifdef __KERNEL__
+		fallthrough;
+#else
+		/* fallthrough */
+#endif /* __KERNEL__ */
+	case IDPF_CTLQ_TYPE_MAILBOX_TX:
+		status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
+		break;
+	default:
+		status = IDPF_ERR_PARAM;
+		break;
+	}
+
+	if (status)
+		goto init_free_q;
+
+	if (is_rxq) {
+		idpf_ctlq_init_rxq_bufs(*cq_out);
+	} else {
+		/* Allocate the array of msg pointers for TX queues */
+		(*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
+			idpf_calloc(hw, qinfo->len,
+				    sizeof(struct idpf_ctlq_msg *));
+		if (!(*cq_out)->bi.tx_msg) {
+			status = IDPF_ERR_NO_MEMORY;
+			goto init_dealloc_q_mem;
+		}
+	}
+
+	idpf_ctlq_setup_regs(*cq_out, qinfo);
+
+	idpf_ctlq_init_regs(hw, *cq_out, is_rxq);
+
+	idpf_init_lock(&(*cq_out)->cq_lock);
+
+	LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);
+
+	return status;
+
+init_dealloc_q_mem:
+	/* free ring buffers and the ring itself */
+	idpf_ctlq_dealloc_ring_res(hw, *cq_out);
+init_free_q:
+	idpf_free(hw, *cq_out);
+
+	return status;
+}
+
+/**
+ * idpf_ctlq_remove - deallocate and remove specified control queue
+ * @hw: pointer to hardware struct
+ * @cq: pointer to control queue to be removed
+ */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+		      struct idpf_ctlq_info *cq)
+{
+	LIST_REMOVE(cq, cq_list);
+	idpf_ctlq_shutdown(hw, cq);
+	idpf_free(hw, cq);
+}
+
+/**
+ * idpf_ctlq_init - main initialization routine for all control queues
+ * @hw: pointer to hardware struct
+ * @num_q: number of queues to initialize
+ * @q_info: array of structs containing info for each queue to be initialized
+ *
+ * This initializes any number and any type of control queues. This is an all
+ * or nothing routine; if one fails, all previously allocated queues will be
+ * destroyed. This must be called prior to using the individual add/remove
+ * APIs.
+ */
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+		   struct idpf_ctlq_create_info *q_info)
+{
+	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
+	int ret_code = IDPF_SUCCESS;
+	int i = 0;
+
+	LIST_INIT(&hw->cq_list_head);
+
+	for (i = 0; i < num_q; i++) {
+		struct idpf_ctlq_create_info *qinfo = q_info + i;
+
+		ret_code = idpf_ctlq_add(hw, qinfo, &cq);
+		if (ret_code)
+			goto init_destroy_qs;
+	}
+
+	return ret_code;
+
+init_destroy_qs:
+	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
+				 idpf_ctlq_info, cq_list)
+		idpf_ctlq_remove(hw, cq);
+
+	return ret_code;
+}
+
+/**
+ * idpf_ctlq_deinit - destroy all control queues
+ * @hw: pointer to hw struct
+ */
+int idpf_ctlq_deinit(struct idpf_hw *hw)
+{
+	struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
+	int ret_code = IDPF_SUCCESS;
+
+	LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
+				 idpf_ctlq_info, cq_list)
+		idpf_ctlq_remove(hw, cq);
+
+	return ret_code;
+}
+
+/**
+ * idpf_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+		   u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+	struct idpf_ctlq_desc *desc;
+	int num_desc_avail = 0;
+	int status = IDPF_SUCCESS;
+	int i = 0;
+
+	if (!cq || !cq->ring_size)
+		return IDPF_ERR_CTLQ_EMPTY;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	/* Ensure there are enough descriptors to send all messages */
+	num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+	if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+		status = IDPF_ERR_CTLQ_FULL;
+		goto sq_send_command_out;
+	}
+
+	for (i = 0; i < num_q_msg; i++) {
+		struct idpf_ctlq_msg *msg = &q_msg[i];
+		u64 msg_cookie;
+
+		desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+
+		desc->opcode = CPU_TO_LE16(msg->opcode);
+		desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
+
+		msg_cookie = *(u64 *)&msg->cookie;
+		desc->cookie_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
+		desc->cookie_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
+
+		desc->flags = CPU_TO_LE16((msg->host_id & IDPF_HOST_ID_MASK) <<
+					  IDPF_CTLQ_FLAG_HOST_ID_S);
+		if (msg->data_len) {
+			struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+			desc->datalen |= CPU_TO_LE16(msg->data_len);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
+			desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
+
+			/* Update the address values in the desc with the pa
+			 * value for respective buffer
+			 */
+			desc->params.indirect.addr_high =
+				CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
+			desc->params.indirect.addr_low =
+				CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
+
+			idpf_memcpy(&desc->params, msg->ctx.indirect.context,
+				    IDPF_INDIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+#ifdef SIMICS_BUILD
+			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
+			 * need to set peer PF function id in param0 for Simics
+			 */
+			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
+				desc->params.indirect.param0 =
+					CPU_TO_LE32(msg->func_id);
+			}
+#endif
+		} else {
+			idpf_memcpy(&desc->params, msg->ctx.direct,
+				    IDPF_DIRECT_CTX_SIZE, IDPF_NONDMA_TO_DMA);
+#ifdef SIMICS_BUILD
+			/* MBX message with opcode idpf_mbq_opc_send_msg_to_pf
+			 * need to set peer PF function id in param0 for Simics
+			 */
+			if (msg->opcode == idpf_mbq_opc_send_msg_to_pf) {
+				desc->params.direct.param0 =
+					CPU_TO_LE32(msg->func_id);
+			}
+#endif
+		}
+
+		/* Store buffer info */
+		cq->bi.tx_msg[cq->next_to_use] = msg;
+
+		(cq->next_to_use)++;
+		if (cq->next_to_use == cq->ring_size)
+			cq->next_to_use = 0;
+	}
+
+	/* Force memory write to complete before letting hardware
+	 * know that there are new descriptors to fetch.
+	 */
+	idpf_wmb();
+
+	wr32(hw, cq->reg.tail, cq->next_to_use);
+
+sq_send_command_out:
+	idpf_release_lock(&cq->cq_lock);
+
+	return status;
+}
+
+/**
+ * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors.  The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[])
+{
+	struct idpf_ctlq_desc *desc;
+	u16 i = 0, num_to_clean;
+	u16 ntc, desc_err;
+	int ret = IDPF_SUCCESS;
+
+	if (!cq || !cq->ring_size)
+		return IDPF_ERR_CTLQ_EMPTY;
+
+	if (*clean_count == 0)
+		return IDPF_SUCCESS;
+	if (*clean_count > cq->ring_size)
+		return IDPF_ERR_PARAM;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	ntc = cq->next_to_clean;
+
+	num_to_clean = *clean_count;
+
+	for (i = 0; i < num_to_clean; i++) {
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		if (!(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		desc_err = LE16_TO_CPU(desc->ret_val);
+		if (desc_err) {
+			/* strip off FW internal code */
+			desc_err &= 0xff;
+		}
+
+		msg_status[i] = cq->bi.tx_msg[ntc];
+		msg_status[i]->status = desc_err;
+
+		cq->bi.tx_msg[ntc] = NULL;
+
+		/* Zero out any stale data */
+		idpf_memset(desc, 0, sizeof(*desc), IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	}
+
+	cq->next_to_clean = ntc;
+
+	idpf_release_lock(&cq->cq_lock);
+
+	/* Return number of descriptors actually cleaned */
+	*clean_count = i;
+
+	return ret;
+}
+
+/**
+ * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+			    u16 *buff_count, struct idpf_dma_mem **buffs)
+{
+	struct idpf_ctlq_desc *desc;
+	u16 ntp = cq->next_to_post;
+	bool buffs_avail = false;
+	u16 tbp = ntp + 1;
+	int status = IDPF_SUCCESS;
+	int i = 0;
+
+	if (*buff_count > cq->ring_size)
+		return IDPF_ERR_PARAM;
+
+	if (*buff_count > 0)
+		buffs_avail = true;
+
+	idpf_acquire_lock(&cq->cq_lock);
+
+	if (tbp >= cq->ring_size)
+		tbp = 0;
+
+	if (tbp == cq->next_to_clean)
+		/* Nothing to do */
+		goto post_buffs_out;
+
+	/* Post buffers for as many as provided or up until the last one used */
+	while (ntp != cq->next_to_clean) {
+		desc = IDPF_CTLQ_DESC(cq, ntp);
+
+		if (cq->bi.rx_buff[ntp])
+			goto fill_desc;
+		if (!buffs_avail) {
+			/* If the caller hasn't given us any buffers or
+			 * there are none left, search the ring itself
+			 * for an available buffer to move to this
+			 * entry starting at the next entry in the ring
+			 */
+			tbp = ntp + 1;
+
+			/* Wrap ring if necessary */
+			if (tbp >= cq->ring_size)
+				tbp = 0;
+
+			while (tbp != cq->next_to_clean) {
+				if (cq->bi.rx_buff[tbp]) {
+					cq->bi.rx_buff[ntp] =
+						cq->bi.rx_buff[tbp];
+					cq->bi.rx_buff[tbp] = NULL;
+
+					/* Found a buffer, no need to
+					 * search anymore
+					 */
+					break;
+				}
+
+				/* Wrap ring if necessary */
+				tbp++;
+				if (tbp >= cq->ring_size)
+					tbp = 0;
+			}
+
+			if (tbp == cq->next_to_clean)
+				goto post_buffs_out;
+		} else {
+			/* Give back pointer to DMA buffer */
+			cq->bi.rx_buff[ntp] = buffs[i];
+			i++;
+
+			if (i >= *buff_count)
+				buffs_avail = false;
+		}
+
+fill_desc:
+		desc->flags =
+			CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+		/* Post buffers to descriptor */
+		desc->datalen = CPU_TO_LE16(cq->bi.rx_buff[ntp]->size);
+		desc->params.indirect.addr_high =
+			CPU_TO_LE32(IDPF_HI_DWORD(cq->bi.rx_buff[ntp]->pa));
+		desc->params.indirect.addr_low =
+			CPU_TO_LE32(IDPF_LO_DWORD(cq->bi.rx_buff[ntp]->pa));
+
+		ntp++;
+		if (ntp == cq->ring_size)
+			ntp = 0;
+	}
+
+post_buffs_out:
+	/* Only update tail if buffers were actually posted */
+	if (cq->next_to_post != ntp) {
+		if (ntp)
+			/* Update next_to_post to ntp - 1 since current ntp
+			 * will not have a buffer
+			 */
+			cq->next_to_post = ntp - 1;
+		else
+			/* Wrap to end of end ring since current ntp is 0 */
+			cq->next_to_post = cq->ring_size - 1;
+
+		wr32(hw, cq->reg.tail, cq->next_to_post);
+	}
+
+	idpf_release_lock(&cq->cq_lock);
+
+	/* return the number of buffers that were not posted */
+	*buff_count = *buff_count - i;
+
+	return status;
+}
+
+/**
+ * idpf_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg)
+{
+	u16 num_to_clean, ntc, ret_val, flags;
+	struct idpf_ctlq_desc *desc;
+	int ret_code = IDPF_SUCCESS;
+	u16 i = 0;
+
+	if (!cq || !cq->ring_size)
+		return IDPF_ERR_CTLQ_EMPTY;
+
+	if (*num_q_msg == 0)
+		return IDPF_SUCCESS;
+	else if (*num_q_msg > cq->ring_size)
+		return IDPF_ERR_PARAM;
+
+	/* take the lock before we start messing with the ring */
+	idpf_acquire_lock(&cq->cq_lock);
+
+	ntc = cq->next_to_clean;
+
+	num_to_clean = *num_q_msg;
+
+	for (i = 0; i < num_to_clean; i++) {
+		u64 msg_cookie;
+
+		/* Fetch next descriptor and check if marked as done */
+		desc = IDPF_CTLQ_DESC(cq, ntc);
+		flags = LE16_TO_CPU(desc->flags);
+
+		if (!(flags & IDPF_CTLQ_FLAG_DD))
+			break;
+
+		ret_val = LE16_TO_CPU(desc->ret_val);
+
+		q_msg[i].vmvf_type = (flags &
+				      (IDPF_CTLQ_FLAG_FTYPE_VM |
+				       IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+				      IDPF_CTLQ_FLAG_FTYPE_S;
+
+		if (flags & IDPF_CTLQ_FLAG_ERR)
+			ret_code = IDPF_ERR_CTLQ_ERROR;
+
+		msg_cookie = (u64)LE32_TO_CPU(desc->cookie_high) << 32;
+		msg_cookie |= (u64)LE32_TO_CPU(desc->cookie_low);
+		idpf_memcpy(&q_msg[i].cookie, &msg_cookie, sizeof(u64),
+			    IDPF_NONDMA_TO_NONDMA);
+
+		q_msg[i].opcode = LE16_TO_CPU(desc->opcode);
+		q_msg[i].data_len = LE16_TO_CPU(desc->datalen);
+		q_msg[i].status = ret_val;
+
+		if (desc->datalen) {
+			idpf_memcpy(q_msg[i].ctx.indirect.context,
+				    &desc->params.indirect,
+				    IDPF_INDIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+
+			/* Assign pointer to dma buffer to ctlq_msg array
+			 * to be given to upper layer
+			 */
+			q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+			/* Zero out pointer to DMA buffer info;
+			 * will be repopulated by post buffers API
+			 */
+			cq->bi.rx_buff[ntc] = NULL;
+		} else {
+			idpf_memcpy(q_msg[i].ctx.direct,
+				    desc->params.raw,
+				    IDPF_DIRECT_CTX_SIZE,
+				    IDPF_DMA_TO_NONDMA);
+		}
+
+		/* Zero out stale data in descriptor */
+		idpf_memset(desc, 0, sizeof(struct idpf_ctlq_desc),
+			    IDPF_DMA_MEM);
+
+		ntc++;
+		if (ntc == cq->ring_size)
+			ntc = 0;
+	};
+
+	cq->next_to_clean = ntc;
+
+	idpf_release_lock(&cq->cq_lock);
+
+	*num_q_msg = i;
+	if (*num_q_msg == 0)
+		ret_code = IDPF_ERR_CTLQ_NO_WORK;
+
+	return ret_code;
+}
diff --git a/drivers/common/idpf/idpf_controlq.h b/drivers/common/idpf/idpf_controlq.h
new file mode 100644
index 0000000000..e7b0d803b3
--- /dev/null
+++ b/drivers/common/idpf/idpf_controlq.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_CONTROLQ_H_
+#define _IDPF_CONTROLQ_H_
+
+#ifdef __KERNEL__
+#include <linux/slab.h>
+#endif
+
+#ifndef __KERNEL__
+#include "idpf_osdep.h"
+#include "idpf_alloc.h"
+#endif
+#include "idpf_controlq_api.h"
+
+/* Maximum buffer lengths for all control queue types */
+#define IDPF_CTLQ_MAX_RING_SIZE 1024
+#define IDPF_CTLQ_MAX_BUF_LEN	4096
+
+#define IDPF_CTLQ_DESC(R, i) \
+	(&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))
+
+#define IDPF_CTLQ_DESC_UNUSED(R)					\
+	((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
+	       (R)->next_to_clean - (R)->next_to_use - 1))
+
+#ifndef __KERNEL__
+/* Data type manipulation macros. */
+#define IDPF_HI_DWORD(x)	((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define IDPF_LO_DWORD(x)	((u32)((x) & 0xFFFFFFFF))
+#define IDPF_HI_WORD(x)		((u16)(((x) >> 16) & 0xFFFF))
+#define IDPF_LO_WORD(x)		((u16)((x) & 0xFFFF))
+
+#endif
+/* Control Queue default settings */
+#define IDPF_CTRL_SQ_CMD_TIMEOUT	250  /* msecs */
+
+struct idpf_ctlq_desc {
+	__le16	flags;
+	__le16	opcode;
+	__le16	datalen;	/* 0 for direct commands */
+	union {
+		__le16 ret_val;
+		__le16 pfid_vfid;
+#define IDPF_CTLQ_DESC_VF_ID_S	0
+#ifdef SIMICS_BUILD
+#define IDPF_CTLQ_DESC_VF_ID_M	(0x3FF << IDPF_CTLQ_DESC_VF_ID_S)
+#define IDPF_CTLQ_DESC_PF_ID_S	10
+#define IDPF_CTLQ_DESC_PF_ID_M	(0x3F << IDPF_CTLQ_DESC_PF_ID_S)
+#else
+#define IDPF_CTLQ_DESC_VF_ID_M	(0x7FF << IDPF_CTLQ_DESC_VF_ID_S)
+#define IDPF_CTLQ_DESC_PF_ID_S	11
+#define IDPF_CTLQ_DESC_PF_ID_M	(0x1F << IDPF_CTLQ_DESC_PF_ID_S)
+#endif
+	};
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 param2;
+			__le32 param3;
+		} direct;
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 addr_high;
+			__le32 addr_low;
+		} indirect;
+		u8 raw[16];
+	} params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|  * RSV *  |FTYPE  | *RSV* |RD |VFC|BUF|  HOST_ID  |
+ */
+/* command flags and offsets */
+#define IDPF_CTLQ_FLAG_DD_S		0
+#define IDPF_CTLQ_FLAG_CMP_S		1
+#define IDPF_CTLQ_FLAG_ERR_S		2
+#define IDPF_CTLQ_FLAG_FTYPE_S		6
+#define IDPF_CTLQ_FLAG_RD_S		10
+#define IDPF_CTLQ_FLAG_VFC_S		11
+#define IDPF_CTLQ_FLAG_BUF_S		12
+#define IDPF_CTLQ_FLAG_HOST_ID_S	13
+
+#define IDPF_CTLQ_FLAG_DD	BIT(IDPF_CTLQ_FLAG_DD_S)	/* 0x1	  */
+#define IDPF_CTLQ_FLAG_CMP	BIT(IDPF_CTLQ_FLAG_CMP_S)	/* 0x2	  */
+#define IDPF_CTLQ_FLAG_ERR	BIT(IDPF_CTLQ_FLAG_ERR_S)	/* 0x4	  */
+#define IDPF_CTLQ_FLAG_FTYPE_VM	BIT(IDPF_CTLQ_FLAG_FTYPE_S)	/* 0x40	  */
+#define IDPF_CTLQ_FLAG_FTYPE_PF	BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1)	/* 0x80   */
+#define IDPF_CTLQ_FLAG_RD	BIT(IDPF_CTLQ_FLAG_RD_S)	/* 0x400  */
+#define IDPF_CTLQ_FLAG_VFC	BIT(IDPF_CTLQ_FLAG_VFC_S)	/* 0x800  */
+#define IDPF_CTLQ_FLAG_BUF	BIT(IDPF_CTLQ_FLAG_BUF_S)	/* 0x1000 */
+
+/* Host ID is a special field that has 3b and not a 1b flag */
+#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
+
+struct idpf_mbxq_desc {
+	u8 pad[8];		/* CTLQ flags/opcode/len/retval fields */
+	u32 chnl_opcode;	/* avoid confusion with desc->opcode */
+	u32 chnl_retval;	/* ditto for desc->retval */
+	u32 pf_vf_id;		/* used by CP when sending to PF */
+};
+
+enum idpf_mac_type {
+	IDPF_MAC_UNKNOWN = 0,
+	IDPF_MAC_PF,
+	IDPF_MAC_VF,
+	IDPF_MAC_GENERIC
+};
+
+#define ETH_ALEN 6
+
+struct idpf_mac_info {
+	enum idpf_mac_type type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+};
+
+#define IDPF_AQ_LINK_UP 0x1
+
+/* PCI bus types */
+enum idpf_bus_type {
+	idpf_bus_type_unknown = 0,
+	idpf_bus_type_pci,
+	idpf_bus_type_pcix,
+	idpf_bus_type_pci_express,
+	idpf_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum idpf_bus_speed {
+	idpf_bus_speed_unknown	= 0,
+	idpf_bus_speed_33	= 33,
+	idpf_bus_speed_66	= 66,
+	idpf_bus_speed_100	= 100,
+	idpf_bus_speed_120	= 120,
+	idpf_bus_speed_133	= 133,
+	idpf_bus_speed_2500	= 2500,
+	idpf_bus_speed_5000	= 5000,
+	idpf_bus_speed_8000	= 8000,
+	idpf_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum idpf_bus_width {
+	idpf_bus_width_unknown	= 0,
+	idpf_bus_width_pcie_x1	= 1,
+	idpf_bus_width_pcie_x2	= 2,
+	idpf_bus_width_pcie_x4	= 4,
+	idpf_bus_width_pcie_x8	= 8,
+	idpf_bus_width_32	= 32,
+	idpf_bus_width_64	= 64,
+	idpf_bus_width_reserved
+};
+
+/* Bus parameters */
+struct idpf_bus_info {
+	enum idpf_bus_speed speed;
+	enum idpf_bus_width width;
+	enum idpf_bus_type type;
+
+	u16 func;
+	u16 device;
+	u16 lan_id;
+	u16 bus_id;
+};
+
+/* Function specific capabilities */
+struct idpf_hw_func_caps {
+	u32 num_alloc_vfs;
+	u32 vf_base_id;
+};
+
+/* Define the APF hardware struct to replace other control structs as needed
+ * Align to ctlq_hw_info
+ */
+struct idpf_hw {
+	/* Some part of BAR0 address space is not mapped by the LAN driver.
+	 * This results in 2 regions of BAR0 to be mapped by LAN driver which
+	 * will have its own base hardware address when mapped.
+	 */
+	u8 *hw_addr;
+	u8 *hw_addr_region2;
+	u64 hw_addr_len;
+	u64 hw_addr_region2_len;
+
+	void *back;
+
+	/* control queue - send and receive */
+	struct idpf_ctlq_info *asq;
+	struct idpf_ctlq_info *arq;
+
+	/* subsystem structs */
+	struct idpf_mac_info mac;
+	struct idpf_bus_info bus;
+	struct idpf_hw_func_caps func_caps;
+
+	/* pci info */
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	bool adapter_stopped;
+
+	LIST_HEAD_TYPE(list_head, idpf_ctlq_info) cq_list_head;
+};
+
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,
+			     struct idpf_ctlq_info *cq);
+
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+
+/* prototype for functions used for dynamic memory allocation */
+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,
+			 u64 size);
+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);
+#endif /* _IDPF_CONTROLQ_H_ */
diff --git a/drivers/common/idpf/idpf_controlq_api.h b/drivers/common/idpf/idpf_controlq_api.h
new file mode 100644
index 0000000000..3bb8f72aad
--- /dev/null
+++ b/drivers/common/idpf/idpf_controlq_api.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_CONTROLQ_API_H_
+#define _IDPF_CONTROLQ_API_H_
+
+#ifdef __KERNEL__
+#include "idpf_mem.h"
+#else /* !__KERNEL__ */
+#include "idpf_osdep.h"
+/* Error Codes */
+/* Linux kernel driver can't directly use these. Instead, they are mapped to
+ * linux compatible error codes which get translated in the build script.
+ */
+#define IDPF_SUCCESS			0
+#define IDPF_ERR_PARAM			-53	/* -EBADR */
+#define IDPF_ERR_NOT_IMPL		-95	/* -EOPNOTSUPP */
+#define IDPF_ERR_NOT_READY		-16	/* -EBUSY */
+#define IDPF_ERR_BAD_PTR		-14	/* -EFAULT */
+#define IDPF_ERR_INVAL_SIZE		-90	/* -EMSGSIZE */
+#define IDPF_ERR_DEVICE_NOT_SUPPORTED	-19	/* -ENODEV */
+#define IDPF_ERR_FW_API_VER		-13	/* -EACCESS */
+#define IDPF_ERR_NO_MEMORY		-12	/* -ENOMEM */
+#define IDPF_ERR_CFG			-22	/* -EINVAL */
+#define IDPF_ERR_OUT_OF_RANGE		-34	/* -ERANGE */
+#define IDPF_ERR_ALREADY_EXISTS		-17	/* -EEXIST */
+#define IDPF_ERR_DOES_NOT_EXIST		-6	/* -ENXIO */
+#define IDPF_ERR_IN_USE			-114	/* -EALREADY */
+#define IDPF_ERR_MAX_LIMIT		-109	/* -ETOOMANYREFS */
+#define IDPF_ERR_RESET_ONGOING		-104	/* -ECONNRESET */
+
+/* CRQ/CSQ specific error codes */
+#define IDPF_ERR_CTLQ_ERROR		-74	/* -EBADMSG */
+#define IDPF_ERR_CTLQ_TIMEOUT		-110	/* -ETIMEDOUT */
+#define IDPF_ERR_CTLQ_FULL		-28	/* -ENOSPC */
+#define IDPF_ERR_CTLQ_NO_WORK		-42	/* -ENOMSG */
+#define IDPF_ERR_CTLQ_EMPTY		-105	/* -ENOBUFS */
+#endif /* !__KERNEL__ */
+
+struct idpf_hw;
+
+/* Used for queue init, response and events */
+enum idpf_ctlq_type {
+	IDPF_CTLQ_TYPE_MAILBOX_TX	= 0,
+	IDPF_CTLQ_TYPE_MAILBOX_RX	= 1,
+	IDPF_CTLQ_TYPE_CONFIG_TX	= 2,
+	IDPF_CTLQ_TYPE_CONFIG_RX	= 3,
+	IDPF_CTLQ_TYPE_EVENT_RX		= 4,
+	IDPF_CTLQ_TYPE_RDMA_TX		= 5,
+	IDPF_CTLQ_TYPE_RDMA_RX		= 6,
+	IDPF_CTLQ_TYPE_RDMA_COMPL	= 7
+};
+
+/*
+ * Generic Control Queue Structures
+ */
+
+struct idpf_ctlq_reg {
+	/* used for queue tracking */
+	u32 head;
+	u32 tail;
+	/* Below applies only to default mb (if present) */
+	u32 len;
+	u32 bah;
+	u32 bal;
+	u32 len_mask;
+	u32 len_ena_mask;
+	u32 head_mask;
+};
+
+/* Generic queue msg structure */
+struct idpf_ctlq_msg {
+	u8 vmvf_type; /* represents the source of the message on recv */
+#define IDPF_VMVF_TYPE_VF 0
+#define IDPF_VMVF_TYPE_VM 1
+#define IDPF_VMVF_TYPE_PF 2
+	u8 host_id;
+	/* 3b field used only when sending a message to peer - to be used in
+	 * combination with target func_id to route the message
+	 */
+#define IDPF_HOST_ID_MASK 0x7
+
+	u16 opcode;
+	u16 data_len;	/* data_len = 0 when no payload is attached */
+	union {
+		u16 func_id;	/* when sending a message */
+		u16 status;	/* when receiving a message */
+	};
+	union {
+		struct {
+			u32 chnl_retval;
+			u32 chnl_opcode;
+		} mbx;
+	} cookie;
+	union {
+#define IDPF_DIRECT_CTX_SIZE	16
+#define IDPF_INDIRECT_CTX_SIZE	8
+		/* 16 bytes of context can be provided or 8 bytes of context
+		 * plus the address of a DMA buffer
+		 */
+		u8 direct[IDPF_DIRECT_CTX_SIZE];
+		struct {
+			u8 context[IDPF_INDIRECT_CTX_SIZE];
+			struct idpf_dma_mem *payload;
+		} indirect;
+	} ctx;
+};
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct idpf_ctlq_create_info {
+	enum idpf_ctlq_type type;
+	int id; /* absolute queue offset passed as input
+		 * -1 for default mailbox if present
+		 */
+	u16 len; /* Queue length passed as input */
+	u16 buf_size; /* buffer size passed as input */
+	u64 base_address; /* output, HPA of the Queue start  */
+	struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+
+	int ext_info_size;
+	void *ext_info; /* Specific to q type */
+};
+
+/* Control Queue information */
+struct idpf_ctlq_info {
+	LIST_ENTRY_TYPE(idpf_ctlq_info) cq_list;
+
+	enum idpf_ctlq_type cq_type;
+	int q_id;
+	idpf_lock cq_lock;		/* queue lock
+					 * idpf_lock is defined in OSdep.h
+					 */
+	/* used for interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+	u16 next_to_post;		/* starting descriptor to post buffers
+					 * to after recev
+					 */
+
+	struct idpf_dma_mem desc_ring;	/* descriptor ring memory
+					 * idpf_dma_mem is defined in OSdep.h
+					 */
+	union {
+		struct idpf_dma_mem **rx_buff;
+		struct idpf_ctlq_msg **tx_msg;
+	} bi;
+
+	u16 buf_size;			/* queue buffer size */
+	u16 ring_size;			/* Number of descriptors */
+	struct idpf_ctlq_reg reg;	/* registers accessed by ctlqs */
+};
+
+/* PF/VF mailbox commands */
+enum idpf_mbx_opc {
+	/* idpf_mbq_opc_send_msg_to_pf:
+	 *	usage: used by PF or VF to send a message to its CPF
+	 *	target: RX queue and function ID of parent PF taken from HW
+	 */
+	idpf_mbq_opc_send_msg_to_pf		= 0x0801,
+
+	/* idpf_mbq_opc_send_msg_to_vf:
+	 *	usage: used by PF to send message to a VF
+	 *	target: VF control queue ID must be specified in descriptor
+	 */
+	idpf_mbq_opc_send_msg_to_vf		= 0x0802,
+
+	/* idpf_mbq_opc_send_msg_to_peer_pf:
+	 *	usage: used by any function to send message to any peer PF
+	 *	target: RX queue and host of parent PF taken from HW
+	 */
+	idpf_mbq_opc_send_msg_to_peer_pf	= 0x0803,
+
+	/* idpf_mbq_opc_send_msg_to_peer_drv:
+	 *	usage: used by any function to send message to any peer driver
+	 *	target: RX queue and target host must be specific in descriptor
+	 */
+	idpf_mbq_opc_send_msg_to_peer_drv	= 0x0804,
+};
+
+/*
+ * API supported for control queue management
+ */
+
+/* Will init all required q including default mb.  "q_info" is an array of
+ * create_info structs equal to the number of control queues to be created.
+ */
+__rte_internal
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+		   struct idpf_ctlq_create_info *q_info);
+
+/* Allocate and initialize a single control queue, which will be added to the
+ * control queue list; returns a handle to the created control queue
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+		  struct idpf_ctlq_create_info *qinfo,
+		  struct idpf_ctlq_info **cq);
+
+/* Deinitialize and deallocate a single control queue */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+		      struct idpf_ctlq_info *cq);
+
+/* Sends messages to HW and will also free the buffer*/
+__rte_internal
+int idpf_ctlq_send(struct idpf_hw *hw,
+		   struct idpf_ctlq_info *cq,
+		   u16 num_q_msg,
+		   struct idpf_ctlq_msg q_msg[]);
+
+/* Receives messages and called by interrupt handler/polling
+ * initiated by app/process. Also caller is supposed to free the buffers
+ */
+__rte_internal
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+		   struct idpf_ctlq_msg *q_msg);
+
+/* Reclaims send descriptors on HW write back */
+__rte_internal
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+		       struct idpf_ctlq_msg *msg_status[]);
+
+/* Indicate RX buffers are done being processed */
+__rte_internal
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
+			    struct idpf_ctlq_info *cq,
+			    u16 *buff_count,
+			    struct idpf_dma_mem **buffs);
+
+/* Will destroy all q including the default mb */
+__rte_internal
+int idpf_ctlq_deinit(struct idpf_hw *hw);
+
+#endif /* _IDPF_CONTROLQ_API_H_ */
diff --git a/drivers/common/idpf/idpf_controlq_setup.c b/drivers/common/idpf/idpf_controlq_setup.c
new file mode 100644
index 0000000000..00dfedb6a4
--- /dev/null
+++ b/drivers/common/idpf/idpf_controlq_setup.c
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+
+#include "idpf_controlq.h"
+
+
+/**
+ * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ */
+static int
+idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
+			  struct idpf_ctlq_info *cq)
+{
+	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
+
+	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
+	if (!cq->desc_ring.va)
+		return IDPF_ERR_NO_MEMORY;
+
+	return IDPF_SUCCESS;
+}
+
+/**
+ * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Allocate the buffer head for all control queues, and if it's a receive
+ * queue, allocate DMA buffers
+ */
+static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
+				struct idpf_ctlq_info *cq)
+{
+	int i = 0;
+
+	/* Do not allocate DMA buffers for transmit queues */
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+		return IDPF_SUCCESS;
+
+	/* We'll be allocating the buffer info memory first, then we can
+	 * allocate the mapped buffers for the event processing
+	 */
+	cq->bi.rx_buff = (struct idpf_dma_mem **)
+		idpf_calloc(hw, cq->ring_size,
+			    sizeof(struct idpf_dma_mem *));
+	if (!cq->bi.rx_buff)
+		return IDPF_ERR_NO_MEMORY;
+
+	/* allocate the mapped buffers (except for the last one) */
+	for (i = 0; i < cq->ring_size - 1; i++) {
+		struct idpf_dma_mem *bi;
+		int num = 1; /* number of idpf_dma_mem to be allocated */
+
+		cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
+						sizeof(struct idpf_dma_mem));
+		if (!cq->bi.rx_buff[i])
+			goto unwind_alloc_cq_bufs;
+
+		bi = cq->bi.rx_buff[i];
+
+		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
+		if (!bi->va) {
+			/* unwind will not free the failed entry */
+			idpf_free(hw, cq->bi.rx_buff[i]);
+			goto unwind_alloc_cq_bufs;
+		}
+	}
+
+	return IDPF_SUCCESS;
+
+unwind_alloc_cq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--) {
+		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+		idpf_free(hw, cq->bi.rx_buff[i]);
+	}
+	idpf_free(hw, cq->bi.rx_buff);
+
+	return IDPF_ERR_NO_MEMORY;
+}
+
+/**
+ * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ */
+static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
+				     struct idpf_ctlq_info *cq)
+{
+	idpf_free_dma_mem(hw, &cq->desc_ring);
+}
+
+/**
+ * idpf_ctlq_free_bufs - Free CQ buffer info elements
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
+ * queues.  The upper layers are expected to manage freeing of TX DMA buffers
+ */
+static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	void *bi;
+
+	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
+		int i;
+
+		/* free DMA buffers for rx queues*/
+		for (i = 0; i < cq->ring_size; i++) {
+			if (cq->bi.rx_buff[i]) {
+				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+				idpf_free(hw, cq->bi.rx_buff[i]);
+			}
+		}
+
+		bi = (void *)cq->bi.rx_buff;
+	} else {
+		bi = (void *)cq->bi.tx_msg;
+	}
+
+	/* free the buffer header */
+	idpf_free(hw, bi);
+}
+
+/**
+ * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the memory used by the ring, buffers and other related structures
+ */
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	/* free ring buffers and the ring itself */
+	idpf_ctlq_free_bufs(hw, cq);
+	idpf_ctlq_free_desc_ring(hw, cq);
+}
+
+/**
+ * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+	int ret_code;
+
+	/* verify input for valid configuration */
+	if (!cq->ring_size || !cq->buf_size)
+		return IDPF_ERR_CFG;
+
+	/* allocate the ring memory */
+	ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
+	if (ret_code)
+		return ret_code;
+
+	/* allocate buffers in the rings */
+	ret_code = idpf_ctlq_alloc_bufs(hw, cq);
+	if (ret_code)
+		goto idpf_init_cq_free_ring;
+
+	/* success! */
+	return IDPF_SUCCESS;
+
+idpf_init_cq_free_ring:
+	idpf_free_dma_mem(hw, &cq->desc_ring);
+	return ret_code;
+}
diff --git a/drivers/common/idpf/idpf_devids.h b/drivers/common/idpf/idpf_devids.h
new file mode 100644
index 0000000000..a91eb4e02a
--- /dev/null
+++ b/drivers/common/idpf/idpf_devids.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_DEVIDS_H_
+#define _IDPF_DEVIDS_H_
+
+/* Vendor ID */
+#define IDPF_INTEL_VENDOR_ID		0x8086
+
+/* Device IDs */
+#define IDPF_DEV_ID_PF			0x1452
+#define IDPF_DEV_ID_VF			0x1889
+
+
+
+
+#endif /* _IDPF_DEVIDS_H_ */
diff --git a/drivers/common/idpf/idpf_lan_pf_regs.h b/drivers/common/idpf/idpf_lan_pf_regs.h
new file mode 100644
index 0000000000..3df2347bd7
--- /dev/null
+++ b/drivers/common/idpf/idpf_lan_pf_regs.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_LAN_PF_REGS_H_
+#define _IDPF_LAN_PF_REGS_H_
+
+
+/* Receive queues */
+#define PF_QRX_BASE			0x00000000
+#define PF_QRX_TAIL(_QRX)		(PF_QRX_BASE + (((_QRX) * 0x1000)))
+#define PF_QRX_BUFFQ_BASE		0x03000000
+#define PF_QRX_BUFFQ_TAIL(_QRX)		(PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000)))
+
+/* Transmit queues */
+#define PF_QTX_BASE			0x05000000
+#define PF_QTX_COMM_DBELL(_DBQM)	(PF_QTX_BASE + ((_DBQM) * 0x1000))
+
+
+/* Control(PF Mailbox) Queue */
+#define PF_FW_BASE			0x08400000
+
+#define PF_FW_ARQBAL			(PF_FW_BASE)
+#define PF_FW_ARQBAH			(PF_FW_BASE + 0x4)
+#define PF_FW_ARQLEN			(PF_FW_BASE + 0x8)
+#define PF_FW_ARQLEN_ARQLEN_S		0
+#define PF_FW_ARQLEN_ARQLEN_M		MAKEMASK(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQVFE_S		28
+#define PF_FW_ARQLEN_ARQVFE_M		BIT(PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S		29
+#define PF_FW_ARQLEN_ARQOVFL_M		BIT(PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S		30
+#define PF_FW_ARQLEN_ARQCRIT_M		BIT(PF_FW_ARQLEN_ARQCRIT_S)
+#define PF_FW_ARQLEN_ARQENABLE_S	31
+#define PF_FW_ARQLEN_ARQENABLE_M	BIT(PF_FW_ARQLEN_ARQENABLE_S)
+#define PF_FW_ARQH			(PF_FW_BASE + 0xC)
+#define PF_FW_ARQH_ARQH_S		0
+#define PF_FW_ARQH_ARQH_M		MAKEMASK(0x1FFF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQT			(PF_FW_BASE + 0x10)
+
+#define PF_FW_ATQBAL			(PF_FW_BASE + 0x14)
+#define PF_FW_ATQBAH			(PF_FW_BASE + 0x18)
+#define PF_FW_ATQLEN			(PF_FW_BASE + 0x1C)
+#define PF_FW_ATQLEN_ATQLEN_S		0
+#define PF_FW_ATQLEN_ATQLEN_M		MAKEMASK(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQVFE_S		28
+#define PF_FW_ATQLEN_ATQVFE_M		BIT(PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S		29
+#define PF_FW_ATQLEN_ATQOVFL_M		BIT(PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S		30
+#define PF_FW_ATQLEN_ATQCRIT_M		BIT(PF_FW_ATQLEN_ATQCRIT_S)
+#define PF_FW_ATQLEN_ATQENABLE_S	31
+#define PF_FW_ATQLEN_ATQENABLE_M	BIT(PF_FW_ATQLEN_ATQENABLE_S)
+#define PF_FW_ATQH			(PF_FW_BASE + 0x20)
+#define PF_FW_ATQH_ATQH_S		0
+#define PF_FW_ATQH_ATQH_M		MAKEMASK(0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQT			(PF_FW_BASE + 0x24)
+
+/* Interrupts */
+#define PF_GLINT_BASE			0x08900000
+#define PF_GLINT_DYN_CTL(_INT)		(PF_GLINT_BASE + ((_INT) * 0x1000))
+#define PF_GLINT_DYN_CTL_INTENA_S	0
+#define PF_GLINT_DYN_CTL_INTENA_M	BIT(PF_GLINT_DYN_CTL_INTENA_S)
+#define PF_GLINT_DYN_CTL_CLEARPBA_S	1
+#define PF_GLINT_DYN_CTL_CLEARPBA_M	BIT(PF_GLINT_DYN_CTL_CLEARPBA_S)
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_S	2
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_M	BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_S	3
+#define PF_GLINT_DYN_CTL_ITR_INDX_M	MAKEMASK(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_INTERVAL_S	5
+#define PF_GLINT_DYN_CTL_INTERVAL_M	BIT(PF_GLINT_DYN_CTL_INTERVAL_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S	24
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S	25
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M	BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_S	30
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_M	BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)
+#define PF_GLINT_DYN_CTL_INTENA_MSK_S	31
+#define PF_GLINT_DYN_CTL_INTENA_MSK_M	BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)
+#define PF_GLINT_ITR_V2(_i, _reg_start) (((_i) * 4) + (_reg_start))
+#define PF_GLINT_ITR(_i, _INT) (PF_GLINT_BASE + (((_i) + 1) * 4) + ((_INT) * 0x1000))
+#define PF_GLINT_ITR_MAX_INDEX		2
+#define PF_GLINT_ITR_INTERVAL_S		0
+#define PF_GLINT_ITR_INTERVAL_M		MAKEMASK(0xFFF, PF_GLINT_ITR_INTERVAL_S)
+
+/* Timesync registers */
+#define PF_TIMESYNC_BASE		0x08404000
+#define PF_GLTSYN_CMD_SYNC		(PF_TIMESYNC_BASE)
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S	0
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M	MAKEMASK(0x3, PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S	2
+#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M	BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)
+#define PF_GLTSYN_SHTIME_0		(PF_TIMESYNC_BASE + 0x4)
+#define PF_GLTSYN_SHTIME_L		(PF_TIMESYNC_BASE + 0x8)
+#define PF_GLTSYN_SHTIME_H		(PF_TIMESYNC_BASE + 0xC)
+#define PF_GLTSYN_ART_L			(PF_TIMESYNC_BASE + 0x10)
+#define PF_GLTSYN_ART_H			(PF_TIMESYNC_BASE + 0x14)
+
+/* Generic registers */
+#define PF_INT_DIR_OICR_ENA		0x08406000
+#define PF_INT_DIR_OICR_ENA_S		0
+#define PF_INT_DIR_OICR_ENA_M	MAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_ENA_S)
+#define PF_INT_DIR_OICR			0x08406004
+#define PF_INT_DIR_OICR_TSYN_EVNT	0
+#define PF_INT_DIR_OICR_PHY_TS_0	BIT(1)
+#define PF_INT_DIR_OICR_PHY_TS_1	BIT(2)
+#define PF_INT_DIR_OICR_CAUSE		0x08406008
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_S	0
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M	MAKEMASK(0xFFFFFFFF, PF_INT_DIR_OICR_CAUSE_CAUSE_S)
+#define PF_INT_PBA_CLEAR		0x0840600C
+
+#define PF_FUNC_RID			0x08406010
+#define PF_FUNC_RID_FUNCTION_NUMBER_S	0
+#define PF_FUNC_RID_FUNCTION_NUMBER_M	MAKEMASK(0x7, PF_FUNC_RID_FUNCTION_NUMBER_S)
+#define PF_FUNC_RID_DEVICE_NUMBER_S	3
+#define PF_FUNC_RID_DEVICE_NUMBER_M	MAKEMASK(0x1F, PF_FUNC_RID_DEVICE_NUMBER_S)
+#define PF_FUNC_RID_BUS_NUMBER_S	8
+#define PF_FUNC_RID_BUS_NUMBER_M	MAKEMASK(0xFF, PF_FUNC_RID_BUS_NUMBER_S)
+
+/* Reset registers */
+#define PFGEN_RTRIG			0x08407000
+#define PFGEN_RTRIG_CORER_S		0
+#define PFGEN_RTRIG_CORER_M		BIT(0)
+#define PFGEN_RTRIG_LINKR_S		1
+#define PFGEN_RTRIG_LINKR_M		BIT(1)
+#define PFGEN_RTRIG_IMCR_S		2
+#define PFGEN_RTRIG_IMCR_M		BIT(2)
+#define PFGEN_RSTAT			0x08407008 /* PFR Status */
+#define PFGEN_RSTAT_PFR_STATE_S		0
+#define PFGEN_RSTAT_PFR_STATE_M		MAKEMASK(0x3, PFGEN_RSTAT_PFR_STATE_S)
+#define PFGEN_CTRL			0x0840700C
+#define PFGEN_CTRL_PFSWR		BIT(0)
+
+#endif
diff --git a/drivers/common/idpf/idpf_lan_txrx.h b/drivers/common/idpf/idpf_lan_txrx.h
new file mode 100644
index 0000000000..98484b267c
--- /dev/null
+++ b/drivers/common/idpf/idpf_lan_txrx.h
@@ -0,0 +1,428 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_LAN_TXRX_H_
+#define _IDPF_LAN_TXRX_H_
+#ifndef __KERNEL__
+#include "idpf_osdep.h"
+#endif
+
+enum idpf_rss_hash {
+	/* Values 0 - 28 are reserved for future use */
+	IDPF_HASH_INVALID		= 0,
+	IDPF_HASH_NONF_UNICAST_IPV4_UDP	= 29,
+	IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
+	IDPF_HASH_NONF_IPV4_UDP,
+	IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
+	IDPF_HASH_NONF_IPV4_TCP,
+	IDPF_HASH_NONF_IPV4_SCTP,
+	IDPF_HASH_NONF_IPV4_OTHER,
+	IDPF_HASH_FRAG_IPV4,
+	/* Values 37-38 are reserved */
+	IDPF_HASH_NONF_UNICAST_IPV6_UDP	= 39,
+	IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
+	IDPF_HASH_NONF_IPV6_UDP,
+	IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
+	IDPF_HASH_NONF_IPV6_TCP,
+	IDPF_HASH_NONF_IPV6_SCTP,
+	IDPF_HASH_NONF_IPV6_OTHER,
+	IDPF_HASH_FRAG_IPV6,
+	IDPF_HASH_NONF_RSVD47,
+	IDPF_HASH_NONF_FCOE_OX,
+	IDPF_HASH_NONF_FCOE_RX,
+	IDPF_HASH_NONF_FCOE_OTHER,
+	/* Values 51-62 are reserved */
+	IDPF_HASH_L2_PAYLOAD		= 63,
+	IDPF_HASH_MAX
+};
+
+/* Supported RSS offloads */
+#define IDPF_DEFAULT_RSS_HASH ( \
+	BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
+	BIT_ULL(IDPF_HASH_FRAG_IPV4) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \
+	BIT_ULL(IDPF_HASH_FRAG_IPV6) | \
+	BIT_ULL(IDPF_HASH_L2_PAYLOAD))
+
+	/* TODO: Wrap below comment under internal flag
+	 * Below 6 pcktypes are not supported by FVL or older products
+	 * They are supported by FPK and future products
+	 */
+#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \
+	BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \
+	BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \
+	BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))
+
+/* For idpf_splitq_base_tx_compl_desc */
+#define IDPF_TXD_COMPLQ_GEN_S	15
+#define IDPF_TXD_COMPLQ_GEN_M		BIT_ULL(IDPF_TXD_COMPLQ_GEN_S)
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_S	11
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_M	\
+	MAKEMASK(0x7UL, IDPF_TXD_COMPLQ_COMPL_TYPE_S)
+#define IDPF_TXD_COMPLQ_QID_S	0
+#define IDPF_TXD_COMPLQ_QID_M		MAKEMASK(0x3FFUL, IDPF_TXD_COMPLQ_QID_S)
+
+/* For base mode TX descriptors */
+
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S	23
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M	BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S)
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S	19
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M	\
+	(0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S)
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S	12
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M	\
+	(0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S	11
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M    \
+	BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S)
+#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST	\
+	IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_S	        9
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_M	(0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_UDP_TUNNELING	BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_GRE_TUNNELING	(0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S	2
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M	\
+	(0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S	0
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M	\
+	(0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S)
+
+#define IDPF_TXD_CTX_QW1_MSS_S		50
+#define IDPF_TXD_CTX_QW1_MSS_M		\
+	MAKEMASK(0x3FFFULL, IDPF_TXD_CTX_QW1_MSS_S)
+#define IDPF_TXD_CTX_QW1_TSO_LEN_S	30
+#define IDPF_TXD_CTX_QW1_TSO_LEN_M	\
+	MAKEMASK(0x3FFFFULL, IDPF_TXD_CTX_QW1_TSO_LEN_S)
+#define IDPF_TXD_CTX_QW1_CMD_S		4
+#define IDPF_TXD_CTX_QW1_CMD_M		\
+	MAKEMASK(0xFFFUL, IDPF_TXD_CTX_QW1_CMD_S)
+#define IDPF_TXD_CTX_QW1_DTYPE_S	0
+#define IDPF_TXD_CTX_QW1_DTYPE_M	\
+	MAKEMASK(0xFUL, IDPF_TXD_CTX_QW1_DTYPE_S)
+#define IDPF_TXD_QW1_L2TAG1_S		48
+#define IDPF_TXD_QW1_L2TAG1_M		\
+	MAKEMASK(0xFFFFULL, IDPF_TXD_QW1_L2TAG1_S)
+#define IDPF_TXD_QW1_TX_BUF_SZ_S	34
+#define IDPF_TXD_QW1_TX_BUF_SZ_M	\
+	MAKEMASK(0x3FFFULL, IDPF_TXD_QW1_TX_BUF_SZ_S)
+#define IDPF_TXD_QW1_OFFSET_S		16
+#define IDPF_TXD_QW1_OFFSET_M		\
+	MAKEMASK(0x3FFFFULL, IDPF_TXD_QW1_OFFSET_S)
+#define IDPF_TXD_QW1_CMD_S		4
+#define IDPF_TXD_QW1_CMD_M		MAKEMASK(0xFFFUL, IDPF_TXD_QW1_CMD_S)
+#define IDPF_TXD_QW1_DTYPE_S		0
+#define IDPF_TXD_QW1_DTYPE_M		MAKEMASK(0xFUL, IDPF_TXD_QW1_DTYPE_S)
+
+/* TX Completion Descriptor Completion Types */
+#define IDPF_TXD_COMPLT_ITR_FLUSH	0
+#define IDPF_TXD_COMPLT_RULE_MISS	1
+#define IDPF_TXD_COMPLT_RS		2
+#define IDPF_TXD_COMPLT_REINJECTED	3
+#define IDPF_TXD_COMPLT_RE		4
+#define IDPF_TXD_COMPLT_SW_MARKER	5
+
+enum idpf_tx_desc_dtype_value {
+	IDPF_TX_DESC_DTYPE_DATA				= 0,
+	IDPF_TX_DESC_DTYPE_CTX				= 1,
+	IDPF_TX_DESC_DTYPE_REINJECT_CTX			= 2,
+	IDPF_TX_DESC_DTYPE_FLEX_DATA			= 3,
+	IDPF_TX_DESC_DTYPE_FLEX_CTX			= 4,
+	IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX			= 5,
+	IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1		= 6,
+	IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2		= 7,
+	IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX	= 8,
+	IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX	= 9,
+	IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX	= 10,
+	IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX		= 11,
+	IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE		= 12,
+	IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX	= 13,
+	IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX		= 14,
+	/* DESC_DONE - HW has completed write-back of descriptor */
+	IDPF_TX_DESC_DTYPE_DESC_DONE			= 15,
+};
+
+enum idpf_tx_ctx_desc_cmd_bits {
+	IDPF_TX_CTX_DESC_TSO		= 0x01,
+	IDPF_TX_CTX_DESC_TSYN		= 0x02,
+	IDPF_TX_CTX_DESC_IL2TAG2	= 0x04,
+	IDPF_TX_CTX_DESC_RSVD		= 0x08,
+	IDPF_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
+	IDPF_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
+	IDPF_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
+	IDPF_TX_CTX_DESC_SWTCH_VSI	= 0x30,
+	IDPF_TX_CTX_DESC_FILT_AU_EN	= 0x40,
+	IDPF_TX_CTX_DESC_FILT_AU_EVICT	= 0x80,
+	IDPF_TX_CTX_DESC_RSVD1		= 0xF00
+};
+
+enum idpf_tx_desc_len_fields {
+	/* Note: These are predefined bit offsets */
+	IDPF_TX_DESC_LEN_MACLEN_S	= 0, /* 7 BITS */
+	IDPF_TX_DESC_LEN_IPLEN_S	= 7, /* 7 BITS */
+	IDPF_TX_DESC_LEN_L4_LEN_S	= 14 /* 4 BITS */
+};
+
+#define IDPF_TXD_QW1_MACLEN_M MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_MACLEN_S)
+#define IDPF_TXD_QW1_IPLEN_M  MAKEMASK(0x7FUL, IDPF_TX_DESC_LEN_IPLEN_S)
+#define IDPF_TXD_QW1_L4LEN_M  MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)
+#define IDPF_TXD_QW1_FCLEN_M  MAKEMASK(0xFUL, IDPF_TX_DESC_LEN_L4_LEN_S)
+
+enum idpf_tx_base_desc_cmd_bits {
+	IDPF_TX_DESC_CMD_EOP			= 0x0001,
+	IDPF_TX_DESC_CMD_RS			= 0x0002,
+	 /* only on VFs else RSVD */
+	IDPF_TX_DESC_CMD_ICRC			= 0x0004,
+	IDPF_TX_DESC_CMD_IL2TAG1		= 0x0008,
+	IDPF_TX_DESC_CMD_RSVD1			= 0x0010,
+	IDPF_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
+	IDPF_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
+	IDPF_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
+	IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
+	IDPF_TX_DESC_CMD_RSVD2			= 0x0080,
+	IDPF_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
+	IDPF_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
+	IDPF_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
+	IDPF_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
+	IDPF_TX_DESC_CMD_RSVD3			= 0x0400,
+	IDPF_TX_DESC_CMD_RSVD4			= 0x0800,
+};
+
+/* Transmit descriptors  */
+/* splitq tx buf, singleq tx buf and singleq compl desc */
+struct idpf_base_tx_desc {
+	__le64 buf_addr; /* Address of descriptor's data buf */
+	__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
+};/* read used with buffer queues*/
+
+struct idpf_splitq_tx_compl_desc {
+	/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
+	__le16 qid_comptype_gen;
+	union {
+		__le16 q_head; /* Queue head */
+		__le16 compl_tag; /* Completion tag */
+	} q_head_compl_tag;
+	u32 rsvd;
+
+};/* writeback used with completion queues*/
+
+/* Context descriptors */
+struct idpf_base_tx_ctx_desc {
+	struct {
+		__le32 tunneling_params;
+		__le16 l2tag2;
+		__le16 rsvd1;
+	} qw0;
+	__le64 qw1; /* type_cmd_tlen_mss/rt_hint */
+};
+
+/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */
+enum idpf_tx_flex_desc_cmd_bits {
+	IDPF_TX_FLEX_DESC_CMD_EOP			= 0x01,
+	IDPF_TX_FLEX_DESC_CMD_RS			= 0x02,
+	IDPF_TX_FLEX_DESC_CMD_RE			= 0x04,
+	IDPF_TX_FLEX_DESC_CMD_IL2TAG1			= 0x08,
+	IDPF_TX_FLEX_DESC_CMD_DUMMY			= 0x10,
+	IDPF_TX_FLEX_DESC_CMD_CS_EN			= 0x20,
+	IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN		= 0x40,
+	IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT		= 0x80,
+};
+
+struct idpf_flex_tx_desc {
+	__le64 buf_addr;	/* Packet buffer address */
+	struct {
+		__le16 cmd_dtype;
+#define IDPF_FLEX_TXD_QW1_DTYPE_S		0
+#define IDPF_FLEX_TXD_QW1_DTYPE_M		\
+		MAKEMASK(0x1FUL, IDPF_FLEX_TXD_QW1_DTYPE_S)
+#define IDPF_FLEX_TXD_QW1_CMD_S		5
+#define IDPF_FLEX_TXD_QW1_CMD_M		MAKEMASK(0x7FFUL, IDPF_TXD_QW1_CMD_S)
+		union {
+			/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
+			u8 raw[4];
+
+			/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
+			struct {
+				__le16 l2tag1;
+				u8 flex;
+				u8 tsync;
+			} tsync;
+
+			/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
+			struct {
+				__le16 l2tag1;
+				__le16 l2tag2;
+			} l2tags;
+		} flex;
+		__le16 buf_size;
+	} qw1;
+};
+
+struct idpf_flex_tx_sched_desc {
+	__le64 buf_addr;	/* Packet buffer address */
+
+	/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */
+	struct {
+		u8 cmd_dtype;
+#define IDPF_TXD_FLEX_FLOW_DTYPE_M	0x1F
+#define IDPF_TXD_FLEX_FLOW_CMD_EOP	0x20
+#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN	0x40
+#define IDPF_TXD_FLEX_FLOW_CMD_RE	0x80
+
+		u8 rsvd[3];
+
+		__le16 compl_tag;
+		__le16 rxr_bufsize;
+#define IDPF_TXD_FLEX_FLOW_RXR		0x4000
+#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M	0x3FFF
+	} qw1;
+};
+
+/* Common cmd fields for all flex context descriptors
+ * Note: these defines already account for the 5 bit dtype in the cmd_dtype
+ * field
+ */
+enum idpf_tx_flex_ctx_desc_cmd_bits {
+	IDPF_TX_FLEX_CTX_DESC_CMD_TSO			= 0x0020,
+	IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN		= 0x0040,
+	IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2		= 0x0080,
+	IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK		= 0x0200, /* 2 bits */
+	IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL		= 0x0400, /* 2 bits */
+	IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI	= 0x0600, /* 2 bits */
+};
+
+/* Standard flex descriptor TSO context quad word */
+struct idpf_flex_tx_tso_ctx_qw {
+	__le32 flex_tlen;
+#define IDPF_TXD_FLEX_CTX_TLEN_M	0x3FFFF
+#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S	24
+	__le16 mss_rt;
+#define IDPF_TXD_FLEX_CTX_MSS_RT_M	0x3FFF
+	u8 hdr_len;
+	u8 flex;
+};
+
+union idpf_flex_tx_ctx_desc {
+	/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */
+	struct {
+		u8 qw0_flex[8];
+		struct {
+			__le16 cmd_dtype;
+			__le16 l2tag1;
+			u8 qw1_flex[4];
+		} qw1;
+	} gen;
+
+	/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
+	struct {
+		struct idpf_flex_tx_tso_ctx_qw qw0;
+		struct {
+			__le16 cmd_dtype;
+			u8 flex[6];
+		} qw1;
+	} tso;
+
+	/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */
+	struct {
+		struct idpf_flex_tx_tso_ctx_qw qw0;
+		struct {
+			__le16 cmd_dtype;
+			__le16 l2tag2;
+			u8 flex0;
+			u8 ptag;
+			u8 flex1[2];
+		} qw1;
+	} tso_l2tag2_ptag;
+
+	/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */
+	struct {
+		u8 qw0_flex[8];
+		struct {
+			__le16 cmd_dtype;
+			__le16 l2tag2;
+			u8 flex[4];
+		} qw1;
+	} l2tag2;
+
+	/* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */
+	struct {
+		struct {
+			__le32 sa_domain;
+#define IDPF_TXD_FLEX_CTX_SA_DOM_M	0xFFFF
+#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL	0x10000
+			__le32 sa_idx;
+#define IDPF_TXD_FLEX_CTX_SAIDX_M	0x1FFFFF
+		} qw0;
+		struct {
+			__le16 cmd_dtype;
+			__le16 txr2comp;
+#define IDPF_TXD_FLEX_CTX_TXR2COMP	0x1
+			__le16 miss_txq_comp_tag;
+			__le16 miss_txq_id;
+		} qw1;
+	} reinjection_pkt;
+};
+
+/* Host Split Context Descriptors */
+struct idpf_flex_tx_hs_ctx_desc {
+	union {
+		struct {
+			__le32 host_fnum_tlen;
+#define IDPF_TXD_FLEX_CTX_TLEN_S	0
+/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */
+#define IDPF_TXD_FLEX_CTX_FNUM_S	18
+#define IDPF_TXD_FLEX_CTX_FNUM_M	0x7FF
+#define IDPF_TXD_FLEX_CTX_HOST_S	29
+#define IDPF_TXD_FLEX_CTX_HOST_M	0x7
+			__le16 ftype_mss_rt;
+#define IDPF_TXD_FLEX_CTX_MSS_RT_0	0
+#define IDPF_TXD_FLEX_CTX_MSS_RT_M	0x3FFF
+#define IDPF_TXD_FLEX_CTX_FTYPE_S	14
+#define IDPF_TXD_FLEX_CTX_FTYPE_VF	MAKEMASK(0x0, IDPF_TXD_FLEX_CTX_FTYPE_S)
+#define IDPF_TXD_FLEX_CTX_FTYPE_VDEV	MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_FTYPE_S)
+#define IDPF_TXD_FLEX_CTX_FTYPE_PF	MAKEMASK(0x2, IDPF_TXD_FLEX_CTX_FTYPE_S)
+			u8 hdr_len;
+			u8 ptag;
+		} tso;
+		struct {
+			u8 flex0[2];
+			__le16 host_fnum_ftype;
+			u8 flex1[3];
+			u8 ptag;
+		} no_tso;
+	} qw0;
+
+	__le64 qw1_cmd_dtype;
+#define IDPF_TXD_FLEX_CTX_QW1_PASID_S		16
+#define IDPF_TXD_FLEX_CTX_QW1_PASID_M		0xFFFFF
+#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID_S	36
+#define IDPF_TXD_FLEX_CTX_QW1_PASID_VALID	\
+		MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_PASID_VALID_S)
+#define IDPF_TXD_FLEX_CTX_QW1_TPH_S		37
+#define IDPF_TXD_FLEX_CTX_QW1_TPH \
+		MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_TPH_S)
+#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_S		38
+#define IDPF_TXD_FLEX_CTX_QW1_PFNUM_M		0xF
+/* The following are only valid for DTYPE = 0x09 and DTYPE = 0x0A */
+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_S		42
+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_M		0x1FFFFF
+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S	63
+#define IDPF_TXD_FLEX_CTX_QW1_SAIDX_VALID	\
+		MAKEMASK(0x1, IDPF_TXD_FLEX_CTX_QW1_SAIDX_VAL_S)
+/* The following are only valid for DTYPE = 0x0D and DTYPE = 0x0E */
+#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_S		48
+#define IDPF_TXD_FLEX_CTX_QW1_FLEX0_M		0xFF
+#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_S		56
+#define IDPF_TXD_FLEX_CTX_QW1_FLEX1_M		0xFF
+};
+#endif /* _IDPF_LAN_TXRX_H_ */
diff --git a/drivers/common/idpf/idpf_lan_vf_regs.h b/drivers/common/idpf/idpf_lan_vf_regs.h
new file mode 100644
index 0000000000..9cd4f757d9
--- /dev/null
+++ b/drivers/common/idpf/idpf_lan_vf_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_LAN_VF_REGS_H_
+#define _IDPF_LAN_VF_REGS_H_
+
+
+/* Reset */
+#define VFGEN_RSTAT			0x00008800
+#define VFGEN_RSTAT_VFR_STATE_S		0
+#define VFGEN_RSTAT_VFR_STATE_M		MAKEMASK(0x3, VFGEN_RSTAT_VFR_STATE_S)
+
+/* Control(VF Mailbox) Queue */
+#define VF_BASE				0x00006000
+
+#define VF_ATQBAL			(VF_BASE + 0x1C00)
+#define VF_ATQBAH			(VF_BASE + 0x1800)
+#define VF_ATQLEN			(VF_BASE + 0x0800)
+#define VF_ATQLEN_ATQLEN_S		0
+#define VF_ATQLEN_ATQLEN_M		MAKEMASK(0x3FF, VF_ATQLEN_ATQLEN_S)
+#define VF_ATQLEN_ATQVFE_S		28
+#define VF_ATQLEN_ATQVFE_M		BIT(VF_ATQLEN_ATQVFE_S)
+#define VF_ATQLEN_ATQOVFL_S		29
+#define VF_ATQLEN_ATQOVFL_M		BIT(VF_ATQLEN_ATQOVFL_S)
+#define VF_ATQLEN_ATQCRIT_S		30
+#define VF_ATQLEN_ATQCRIT_M		BIT(VF_ATQLEN_ATQCRIT_S)
+#define VF_ATQLEN_ATQENABLE_S		31
+#define VF_ATQLEN_ATQENABLE_M		BIT(VF_ATQLEN_ATQENABLE_S)
+#define VF_ATQH				(VF_BASE + 0x0400)
+#define VF_ATQH_ATQH_S			0
+#define VF_ATQH_ATQH_M			MAKEMASK(0x3FF, VF_ATQH_ATQH_S)
+#define VF_ATQT				(VF_BASE + 0x2400)
+
+#define VF_ARQBAL			(VF_BASE + 0x0C00)
+#define VF_ARQBAH			(VF_BASE)
+#define VF_ARQLEN			(VF_BASE + 0x2000)
+#define VF_ARQLEN_ARQLEN_S		0
+#define VF_ARQLEN_ARQLEN_M		MAKEMASK(0x3FF, VF_ARQLEN_ARQLEN_S)
+#define VF_ARQLEN_ARQVFE_S		28
+#define VF_ARQLEN_ARQVFE_M		BIT(VF_ARQLEN_ARQVFE_S)
+#define VF_ARQLEN_ARQOVFL_S		29
+#define VF_ARQLEN_ARQOVFL_M		BIT(VF_ARQLEN_ARQOVFL_S)
+#define VF_ARQLEN_ARQCRIT_S		30
+#define VF_ARQLEN_ARQCRIT_M		BIT(VF_ARQLEN_ARQCRIT_S)
+#define VF_ARQLEN_ARQENABLE_S		31
+#define VF_ARQLEN_ARQENABLE_M		BIT(VF_ARQLEN_ARQENABLE_S)
+#define VF_ARQH				(VF_BASE + 0x1400)
+#define VF_ARQH_ARQH_S			0
+#define VF_ARQH_ARQH_M			MAKEMASK(0x1FFF, VF_ARQH_ARQH_S)
+#define VF_ARQT				(VF_BASE + 0x1000)
+
+/* Transmit queues */
+#define VF_QTX_TAIL_BASE		0x00000000
+#define VF_QTX_TAIL(_QTX)		(VF_QTX_TAIL_BASE + (_QTX) * 0x4)
+#define VF_QTX_TAIL_EXT_BASE		0x00040000
+#define VF_QTX_TAIL_EXT(_QTX)		(VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4))
+
+/* Receive queues */
+#define VF_QRX_TAIL_BASE		0x00002000
+#define VF_QRX_TAIL(_QRX)		(VF_QRX_TAIL_BASE + ((_QRX) * 4))
+#define VF_QRX_TAIL_EXT_BASE		0x00050000
+#define VF_QRX_TAIL_EXT(_QRX)		(VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4))
+#define VF_QRXB_TAIL_BASE		0x00060000
+#define VF_QRXB_TAIL(_QRX)		(VF_QRXB_TAIL_BASE + ((_QRX) * 4))
+
+/* Interrupts */
+#define VF_INT_DYN_CTL0			0x00005C00
+#define VF_INT_DYN_CTL0_INTENA_S	0
+#define VF_INT_DYN_CTL0_INTENA_M	BIT(VF_INT_DYN_CTL0_INTENA_S)
+#define VF_INT_DYN_CTL0_ITR_INDX_S	3
+#define VF_INT_DYN_CTL0_ITR_INDX_M	MAKEMASK(0x3, VF_INT_DYN_CTL0_ITR_INDX_S)
+#define VF_INT_DYN_CTLN(_INT)		(0x00003800 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_EXT(_INT)	(0x00070000 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_INTENA_S	0
+#define VF_INT_DYN_CTLN_INTENA_M	BIT(VF_INT_DYN_CTLN_INTENA_S)
+#define VF_INT_DYN_CTLN_CLEARPBA_S	1
+#define VF_INT_DYN_CTLN_CLEARPBA_M	BIT(VF_INT_DYN_CTLN_CLEARPBA_S)
+#define VF_INT_DYN_CTLN_SWINT_TRIG_S	2
+#define VF_INT_DYN_CTLN_SWINT_TRIG_M	BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)
+#define VF_INT_DYN_CTLN_ITR_INDX_S	3
+#define VF_INT_DYN_CTLN_ITR_INDX_M	MAKEMASK(0x3, VF_INT_DYN_CTLN_ITR_INDX_S)
+#define VF_INT_DYN_CTLN_INTERVAL_S	5
+#define VF_INT_DYN_CTLN_INTERVAL_M	BIT(VF_INT_DYN_CTLN_INTERVAL_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S	24
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M	BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_S	25
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_M	BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)
+#define VF_INT_DYN_CTLN_WB_ON_ITR_S	30
+#define VF_INT_DYN_CTLN_WB_ON_ITR_M	BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)
+#define VF_INT_DYN_CTLN_INTENA_MSK_S	31
+#define VF_INT_DYN_CTLN_INTENA_MSK_M	BIT(VF_INT_DYN_CTLN_INTENA_MSK_S)
+#define VF_INT_ITR0(_i)			(0x00004C00 + ((_i) * 4))
+#define VF_INT_ITRN_V2(_i, _reg_start)	((_reg_start) + (((_i)) * 4))
+#define VF_INT_ITRN(_i, _INT)		(0x00002800 + ((_i) * 4) + ((_INT) * 0x40))
+#define VF_INT_ITRN_64(_i, _INT)	(0x00002C00 + ((_i) * 4) + ((_INT) * 0x100))
+#define VF_INT_ITRN_2K(_i, _INT)	(0x00072000 + ((_i) * 4) + ((_INT) * 0x100))
+#define VF_INT_ITRN_MAX_INDEX		2
+#define VF_INT_ITRN_INTERVAL_S		0
+#define VF_INT_ITRN_INTERVAL_M		MAKEMASK(0xFFF, VF_INT_ITRN_INTERVAL_S)
+#define VF_INT_PBA_CLEAR		0x00008900
+
+#define VF_INT_ICR0_ENA1		0x00005000
+#define VF_INT_ICR0_ENA1_ADMINQ_S	30
+#define VF_INT_ICR0_ENA1_ADMINQ_M	BIT(VF_INT_ICR0_ENA1_ADMINQ_S)
+#define VF_INT_ICR0_ENA1_RSVD_S		31
+#define VF_INT_ICR01			0x00004800
+#define VF_QF_HENA(_i)			(0x0000C400 + ((_i) * 4))
+#define VF_QF_HENA_MAX_INDX		1
+#define VF_QF_HKEY(_i)			(0x0000CC00 + ((_i) * 4))
+#define VF_QF_HKEY_MAX_INDX		12
+#define VF_QF_HLUT(_i)			(0x0000D000 + ((_i) * 4))
+#define VF_QF_HLUT_MAX_INDX		15
+#endif
diff --git a/drivers/common/idpf/idpf_osdep.h b/drivers/common/idpf/idpf_osdep.h
new file mode 100644
index 0000000000..ee64884a97
--- /dev/null
+++ b/drivers/common/idpf/idpf_osdep.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_OSDEP_H_
+#define _IDPF_OSDEP_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_random.h>
+#include <rte_io.h>
+
+#define INLINE inline
+#define STATIC static
+
+typedef uint8_t		u8;
+typedef int8_t		s8;
+typedef uint16_t	u16;
+typedef int16_t		s16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+typedef uint64_t	u64;
+typedef uint64_t	s64;
+
+typedef struct idpf_lock idpf_lock;
+
+#define __iomem
+#define hw_dbg(hw, S, A...)	do {} while (0)
+#define upper_32_bits(n)	((u32)(((n) >> 16) >> 16))
+#define lower_32_bits(n)	((u32)(n))
+#define low_16_bits(x)		((x) & 0xFFFF)
+#define high_16_bits(x)		(((x) & 0xFFFF0000) >> 16)
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN		6
+#endif
+
+#ifndef __le16
+#define __le16	uint16_t
+#endif
+#ifndef __le32
+#define __le32	uint32_t
+#endif
+#ifndef __le64
+#define __le64	uint64_t
+#endif
+#ifndef __be16
+#define __be16	uint16_t
+#endif
+#ifndef __be32
+#define __be32	uint32_t
+#endif
+#ifndef __be64
+#define __be64	uint64_t
+#endif
+
+#ifndef __always_unused
+#define __always_unused  __attribute__((__unused__))
+#endif
+#ifndef __maybe_unused
+#define __maybe_unused  __attribute__((__unused__))
+#endif
+#ifndef __packed
+#define __packed  __attribute__((packed))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif
+
+#ifndef BIT
+#define BIT(a) (1ULL << (a))
+#endif
+
+#define FALSE	0
+#define TRUE	1
+#define false	0
+#define true	1
+
+/* Avoid macro redefinition warning on Windows */
+#ifdef RTE_EXEC_ENV_WINDOWS
+#ifdef min
+#undef min
+#endif
+#ifdef max
+#undef max
+#endif
+#endif
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define ARRAY_SIZE(arr)  RTE_DIM(arr)
+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->(f)))
+#define MAKEMASK(m, s) ((m) << (s))
+
+extern int idpf_common_logger;
+
+#define DEBUGOUT(S)		rte_log(RTE_LOG_DEBUG, idpf_common_logger, S)
+#define DEBUGOUT2(S, A...)	rte_log(RTE_LOG_DEBUG, idpf_common_logger, S, ##A)
+#define DEBUGFUNC(F)		DEBUGOUT(F "\n")
+
+#define idpf_debug(h, m, s, ...)					\
+	do {								\
+		if (((m) & (h)->debug_mask))				\
+			PMD_DRV_LOG_RAW(DEBUG, "idpf %02x.%x " s,       \
+					(h)->bus.device, (h)->bus.func,	\
+					##__VA_ARGS__);			\
+	} while (0)
+
+#define idpf_info(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)
+#define idpf_warn(hw, fmt, args...) idpf_debug(hw, IDPF_DBG_ALL, fmt, ##args)
+#define idpf_debug_array(hw, type, rowsize, groupsize, buf, len)	\
+	do {								\
+		struct idpf_hw *hw_l = hw;				\
+		u16 len_l = len;					\
+		u8 *buf_l = buf;					\
+		int i;							\
+		for (i = 0; i < len_l; i += 8)				\
+			idpf_debug(hw_l, type,				\
+				   "0x%04X  0x%016"PRIx64"\n",		\
+				   i, *((u64 *)((buf_l) + i)));		\
+	} while (0)
+#define idpf_snprintf snprintf
+#ifndef SNPRINTF
+#define SNPRINTF idpf_snprintf
+#endif
+
+#define IDPF_PCI_REG(reg)     rte_read32(reg)
+#define IDPF_PCI_REG_ADDR(a, reg)				\
+	((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
+#define IDPF_PCI_REG64(reg)     rte_read64(reg)
+#define IDPF_PCI_REG_ADDR64(a, reg)				\
+	((volatile uint64_t *)((char *)(a)->hw_addr + (reg)))
+
+#define idpf_wmb() rte_io_wmb()
+#define idpf_rmb() rte_io_rmb()
+#define idpf_mb() rte_io_mb()
+
+static inline uint32_t idpf_read_addr(volatile void *addr)
+{
+	return rte_le_to_cpu_32(IDPF_PCI_REG(addr));
+}
+
+static inline uint64_t idpf_read_addr64(volatile void *addr)
+{
+	return rte_le_to_cpu_64(IDPF_PCI_REG64(addr));
+}
+
+#define IDPF_PCI_REG_WRITE(reg, value)			\
+	rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IDPF_PCI_REG_WRITE64(reg, value)		\
+	rte_write64((rte_cpu_to_le_64(value)), reg)
+
+#define IDPF_READ_REG(hw, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((hw), (reg)))
+#define IDPF_WRITE_REG(hw, reg, value)					\
+	IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define rd32(a, reg) idpf_read_addr(IDPF_PCI_REG_ADDR((a), (reg)))
+#define wr32(a, reg, value)						\
+	IDPF_PCI_REG_WRITE(IDPF_PCI_REG_ADDR((a), (reg)), (value))
+#define div64_long(n, d) ((n) / (d))
+#define rd64(a, reg) idpf_read_addr64(IDPF_PCI_REG_ADDR64((a), (reg)))
+
+#define BITS_PER_BYTE       8
+
+/* memory allocation tracking */
+struct idpf_dma_mem {
+	void *va;
+	u64 pa;
+	u32 size;
+	const void *zone;
+} __attribute__((packed));
+
+struct idpf_virt_mem {
+	void *va;
+	u32 size;
+} __attribute__((packed));
+
+#define idpf_malloc(h, s)	rte_zmalloc(NULL, s, 0)
+#define idpf_calloc(h, c, s)	rte_zmalloc(NULL, (c) * (s), 0)
+#define idpf_free(h, m)		rte_free(m)
+
+#define idpf_memset(a, b, c, d)	memset((a), (b), (c))
+#define idpf_memcpy(a, b, c, d)	rte_memcpy((a), (b), (c))
+#define idpf_memdup(a, b, c, d)	rte_memcpy(idpf_malloc(a, c), b, c)
+
+#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
+#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
+#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define NTOHS(a) rte_be_to_cpu_16(a)
+#define NTOHL(a) rte_be_to_cpu_32(a)
+#define HTONS(a) rte_cpu_to_be_16(a)
+#define HTONL(a) rte_cpu_to_be_32(a)
+
+/* SW spinlock */
+struct idpf_lock {
+	rte_spinlock_t spinlock;
+};
+
+static inline void
+idpf_init_lock(struct idpf_lock *sp)
+{
+	rte_spinlock_init(&sp->spinlock);
+}
+
+static inline void
+idpf_acquire_lock(struct idpf_lock *sp)
+{
+	rte_spinlock_lock(&sp->spinlock);
+}
+
+static inline void
+idpf_release_lock(struct idpf_lock *sp)
+{
+	rte_spinlock_unlock(&sp->spinlock);
+}
+
+static inline void
+idpf_destroy_lock(__attribute__((unused)) struct idpf_lock *sp)
+{
+}
+
+struct idpf_hw;
+
+static inline void *
+idpf_alloc_dma_mem(__attribute__((unused)) struct idpf_hw *hw,
+		   struct idpf_dma_mem *mem, u64 size)
+{
+	const struct rte_memzone *mz = NULL;
+	char z_name[RTE_MEMZONE_NAMESIZE];
+
+	if (!mem)
+		return NULL;
+
+	snprintf(z_name, sizeof(z_name), "idpf_dma_%"PRIu64, rte_rand());
+	mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
+					 RTE_MEMZONE_IOVA_CONTIG, RTE_PGSIZE_4K);
+	if (!mz)
+		return NULL;
+
+	mem->size = size;
+	mem->va = mz->addr;
+	mem->pa = mz->iova;
+	mem->zone = (const void *)mz;
+	memset(mem->va, 0, size);
+
+	return mem->va;
+}
+
+static inline void
+idpf_free_dma_mem(__attribute__((unused)) struct idpf_hw *hw,
+		  struct idpf_dma_mem *mem)
+{
+	rte_memzone_free((const struct rte_memzone *)mem->zone);
+	mem->size = 0;
+	mem->va = NULL;
+	mem->pa = 0;
+}
+
+static inline u8
+idpf_hweight8(u32 num)
+{
+	u8 bits = 0;
+	u32 i;
+
+	for (i = 0; i < 8; i++) {
+		bits += (u8)(num & 0x1);
+		num >>= 1;
+	}
+
+	return bits;
+}
+
+static inline u8
+idpf_hweight32(u32 num)
+{
+	u8 bits = 0;
+	u32 i;
+
+	for (i = 0; i < 32; i++) {
+		bits += (u8)(num & 0x1);
+		num >>= 1;
+	}
+
+	return bits;
+}
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define idpf_usec_delay(x) rte_delay_us(x)
+#define idpf_msec_delay(x, y) rte_delay_us(1000 * (x))
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#ifndef IDPF_DBG_TRACE
+#define IDPF_DBG_TRACE	  BIT_ULL(0)
+#endif
+
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif
+
+#ifndef IDPF_INTEL_VENDOR_ID
+#define IDPF_INTEL_VENDOR_ID	    0x8086
+#endif
+
+#ifndef IS_UNICAST_ETHER_ADDR
+#define IS_UNICAST_ETHER_ADDR(addr)			\
+	((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
+#endif
+
+#ifndef IS_MULTICAST_ETHER_ADDR
+#define IS_MULTICAST_ETHER_ADDR(addr)			\
+	((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
+#endif
+
+#ifndef IS_BROADCAST_ETHER_ADDR
+/* Check whether an address is broadcast. */
+#define IS_BROADCAST_ETHER_ADDR(addr)			\
+	((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
+#endif
+
+#ifndef IS_ZERO_ETHER_ADDR
+#define IS_ZERO_ETHER_ADDR(addr)				\
+	(((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) &&	\
+	 ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) &&	\
+	 ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
+#endif
+
+#ifndef LIST_HEAD_TYPE
+#define LIST_HEAD_TYPE(list_name, type) LIST_HEAD(list_name, type)
+#endif
+
+#ifndef LIST_ENTRY_TYPE
+#define LIST_ENTRY_TYPE(type)	   LIST_ENTRY(type)
+#endif
+
+#ifndef LIST_FOR_EACH_ENTRY_SAFE
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list)	\
+	LIST_FOREACH(pos, head, list)
+
+#endif
+
+#ifndef LIST_FOR_EACH_ENTRY
+#define LIST_FOR_EACH_ENTRY(pos, head, entry_type, list)		\
+	LIST_FOREACH(pos, head, list)
+
+#endif
+
+#endif /* _IDPF_OSDEP_H_ */
diff --git a/drivers/common/idpf/idpf_prototype.h b/drivers/common/idpf/idpf_prototype.h
new file mode 100644
index 0000000000..529b62212d
--- /dev/null
+++ b/drivers/common/idpf/idpf_prototype.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_PROTOTYPE_H_
+#define _IDPF_PROTOTYPE_H_
+
+/* Include generic macros and types first */
+#include "idpf_osdep.h"
+#include "idpf_controlq.h"
+#include "idpf_type.h"
+#include "idpf_alloc.h"
+#include "idpf_devids.h"
+#include "idpf_controlq_api.h"
+#include "idpf_lan_pf_regs.h"
+#include "idpf_lan_vf_regs.h"
+#include "idpf_lan_txrx.h"
+#include "virtchnl.h"
+
+#define APF
+
+int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);
+int idpf_deinit_hw(struct idpf_hw *hw);
+
+int idpf_clean_arq_element(struct idpf_hw *hw,
+			   struct idpf_arq_event_info *e,
+			   u16 *events_pending);
+bool idpf_asq_done(struct idpf_hw *hw);
+bool idpf_check_asq_alive(struct idpf_hw *hw);
+
+int idpf_get_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
+		     u8 *lut, u16 lut_size);
+int idpf_set_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
+		     u8 *lut, u16 lut_size);
+int idpf_get_rss_key(struct idpf_hw *hw, u16 seid,
+		     struct idpf_get_set_rss_key_data *key);
+int idpf_set_rss_key(struct idpf_hw *hw, u16 seid,
+		     struct idpf_get_set_rss_key_data *key);
+
+int idpf_set_mac_type(struct idpf_hw *hw);
+
+int idpf_reset(struct idpf_hw *hw);
+int idpf_send_msg_to_cp(struct idpf_hw *hw, enum virtchnl_ops v_opcode,
+			int v_retval, u8 *msg, u16 msglen);
+#endif /* _IDPF_PROTOTYPE_H_ */
diff --git a/drivers/common/idpf/idpf_type.h b/drivers/common/idpf/idpf_type.h
new file mode 100644
index 0000000000..3b46536287
--- /dev/null
+++ b/drivers/common/idpf/idpf_type.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _IDPF_TYPE_H_
+#define _IDPF_TYPE_H_
+
+#include "idpf_controlq.h"
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
+
+#define MAKEMASK(m, s)	((m) << (s))
+
+struct idpf_eth_stats {
+	u64 rx_bytes;			/* gorc */
+	u64 rx_unicast;			/* uprc */
+	u64 rx_multicast;		/* mprc */
+	u64 rx_broadcast;		/* bprc */
+	u64 rx_discards;		/* rdpc */
+	u64 rx_unknown_protocol;	/* rupp */
+	u64 tx_bytes;			/* gotc */
+	u64 tx_unicast;			/* uptc */
+	u64 tx_multicast;		/* mptc */
+	u64 tx_broadcast;		/* bptc */
+	u64 tx_discards;		/* tdpc */
+	u64 tx_errors;			/* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct idpf_hw_port_stats {
+	/* eth stats collected by the port */
+	struct idpf_eth_stats eth;
+
+	/* additional port specific stats */
+	u64 tx_dropped_link_down;	/* tdold */
+	u64 crc_errors;			/* crcerrs */
+	u64 illegal_bytes;		/* illerrc */
+	u64 error_bytes;		/* errbc */
+	u64 mac_local_faults;		/* mlfc */
+	u64 mac_remote_faults;		/* mrfc */
+	u64 rx_length_errors;		/* rlec */
+	u64 link_xon_rx;		/* lxonrxc */
+	u64 link_xoff_rx;		/* lxoffrxc */
+	u64 priority_xon_rx[8];		/* pxonrxc[8] */
+	u64 priority_xoff_rx[8];	/* pxoffrxc[8] */
+	u64 link_xon_tx;		/* lxontxc */
+	u64 link_xoff_tx;		/* lxofftxc */
+	u64 priority_xon_tx[8];		/* pxontxc[8] */
+	u64 priority_xoff_tx[8];	/* pxofftxc[8] */
+	u64 priority_xon_2_xoff[8];	/* pxon2offc[8] */
+	u64 rx_size_64;			/* prc64 */
+	u64 rx_size_127;		/* prc127 */
+	u64 rx_size_255;		/* prc255 */
+	u64 rx_size_511;		/* prc511 */
+	u64 rx_size_1023;		/* prc1023 */
+	u64 rx_size_1522;		/* prc1522 */
+	u64 rx_size_big;		/* prc9522 */
+	u64 rx_undersize;		/* ruc */
+	u64 rx_fragments;		/* rfc */
+	u64 rx_oversize;		/* roc */
+	u64 rx_jabber;			/* rjc */
+	u64 tx_size_64;			/* ptc64 */
+	u64 tx_size_127;		/* ptc127 */
+	u64 tx_size_255;		/* ptc255 */
+	u64 tx_size_511;		/* ptc511 */
+	u64 tx_size_1023;		/* ptc1023 */
+	u64 tx_size_1522;		/* ptc1522 */
+	u64 tx_size_big;		/* ptc9522 */
+	u64 mac_short_packet_dropped;	/* mspdc */
+	u64 checksum_error;		/* xec */
+};
+/* Static buffer size to initialize control queue */
+struct idpf_ctlq_size {
+	u16 asq_buf_size;
+	u16 asq_ring_size;
+	u16 arq_buf_size;
+	u16 arq_ring_size;
+};
+
+/* Temporary definition to compile - TBD if needed */
+struct idpf_arq_event_info {
+	struct idpf_ctlq_desc desc;
+	u16 msg_len;
+	u16 buf_len;
+	u8 *msg_buf;
+};
+
+struct idpf_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+struct idpf_aq_get_phy_abilities_resp {
+	__le32 phy_type;
+};
+
+struct idpf_filter_program_desc {
+	__le32 qid;
+};
+
+#endif /* _IDPF_TYPE_H_ */
diff --git a/drivers/common/idpf/meson.build b/drivers/common/idpf/meson.build
new file mode 100644
index 0000000000..183587b51a
--- /dev/null
+++ b/drivers/common/idpf/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2022 Intel Corporation
+
+sources = files(
+        'idpf_common.c',
+        'idpf_controlq.c',
+        'idpf_controlq_setup.c',
+)
+
+cflags += ['-Wno-unused-value']
+cflags += ['-Wno-unused-variable']
+cflags += ['-Wno-unused-parameter']
+cflags += ['-Wno-implicit-fallthrough']
+cflags += ['-Wno-strict-aliasing']
diff --git a/drivers/common/idpf/siov_regs.h b/drivers/common/idpf/siov_regs.h
new file mode 100644
index 0000000000..3ac4f8f177
--- /dev/null
+++ b/drivers/common/idpf/siov_regs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+#ifndef _SIOV_REGS_H_
+#define _SIOV_REGS_H_
+#define VDEV_MBX_START			0x20000 /* Begin at 128KB */
+#define VDEV_MBX_ATQBAL			(VDEV_MBX_START + 0x0000)
+#define VDEV_MBX_ATQBAH			(VDEV_MBX_START + 0x0004)
+#define VDEV_MBX_ATQLEN			(VDEV_MBX_START + 0x0008)
+#define VDEV_MBX_ATQH			(VDEV_MBX_START + 0x000C)
+#define VDEV_MBX_ATQT			(VDEV_MBX_START + 0x0010)
+#define VDEV_MBX_ARQBAL			(VDEV_MBX_START + 0x0014)
+#define VDEV_MBX_ARQBAH			(VDEV_MBX_START + 0x0018)
+#define VDEV_MBX_ARQLEN			(VDEV_MBX_START + 0x001C)
+#define VDEV_MBX_ARQH			(VDEV_MBX_START + 0x0020)
+#define VDEV_MBX_ARQT			(VDEV_MBX_START + 0x0024)
+#define VDEV_GET_RSTAT			0x21000 /* 132KB for RSTAT */
+
+/* Begin at offset after 1MB (after 256 4k pages) */
+#define VDEV_QRX_TAIL_START		0x100000
+#define VDEV_QRX_TAIL(_i)		(VDEV_QRX_TAIL_START + ((_i) * 0x1000)) /* 2k Rx queues */
+
+/* Begin at offset of 9MB for Rx buffer queue tail register pages */
+#define VDEV_QRX_BUFQ_TAIL_START	0x900000
+/* 2k Rx buffer queues */
+#define VDEV_QRX_BUFQ_TAIL(_i)		(VDEV_QRX_BUFQ_TAIL_START + ((_i) * 0x1000))
+
+/* Begin at offset of 17MB for 2k Tx queues */
+#define VDEV_QTX_TAIL_START		0x1100000
+#define VDEV_QTX_TAIL(_i)		(VDEV_QTX_TAIL_START + ((_i) * 0x1000)) /* 2k Tx queues */
+
+/* Begin at offset of 25MB for 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL_START	0x1900000
+/* 2k Tx completion queues */
+#define VDEV_QTX_COMPL_TAIL(_i)		(VDEV_QTX_COMPL_TAIL_START + ((_i) * 0x1000))
+
+#define VDEV_INT_DYN_CTL01		0x2100000 /* Begin at offset 33MB */
+
+/* Begin at offset of 33MB + 4k to accommodate CTL01 register */
+#define VDEV_INT_DYN_START		(VDEV_INT_DYN_CTL01 + 0x1000)
+#define VDEV_INT_DYN_CTL(_i)		(VDEV_INT_DYN_START + ((_i) * 0x1000))
+#define VDEV_INT_ITR_0(_i)		(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x04)
+#define VDEV_INT_ITR_1(_i)		(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x08)
+#define VDEV_INT_ITR_2(_i)		(VDEV_INT_DYN_START + ((_i) * 0x1000) + 0x0C)
+
+/* Next offset to begin at 42MB (0x2A00000) */
+#endif /* _SIOV_REGS_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
new file mode 100644
index 0000000000..bfb246c752
--- /dev/null
+++ b/drivers/common/idpf/version.map
@@ -0,0 +1,12 @@
+INTERNAL {
+	global:
+
+	idpf_ctlq_deinit;
+	idpf_ctlq_init;
+	idpf_ctlq_clean_sq;
+	idpf_ctlq_recv;
+	idpf_ctlq_send;
+	idpf_ctlq_post_rx_buffs;
+
+	local: *;
+};
diff --git a/drivers/common/idpf/virtchnl.h b/drivers/common/idpf/virtchnl.h
new file mode 100644
index 0000000000..ea798e3971
--- /dev/null
+++ b/drivers/common/idpf/virtchnl.h
@@ -0,0 +1,2866 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS	6
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+	{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+	{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+
+/* Error Codes
+ * Note that many older versions of various iAVF drivers convert the reported
+ * status code directly into an iavf_status enumeration. For this reason, it
+ * is important that the values of these enumerations line up.
+ */
+enum virtchnl_status_code {
+	VIRTCHNL_STATUS_SUCCESS				= 0,
+	VIRTCHNL_STATUS_ERR_PARAM			= -5,
+	VIRTCHNL_STATUS_ERR_NO_MEMORY			= -18,
+	VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH		= -38,
+	VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR		= -39,
+	VIRTCHNL_STATUS_ERR_INVALID_VF_ID		= -40,
+	VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR		= -53,
+	VIRTCHNL_STATUS_ERR_NOT_SUPPORTED		= -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT		0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT		0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT	0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT		0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT		0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT		0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT		0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT		0x7
+
+enum virtchnl_link_speed {
+	VIRTCHNL_LINK_SPEED_UNKNOWN	= 0,
+	VIRTCHNL_LINK_SPEED_100MB	= BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+	VIRTCHNL_LINK_SPEED_1GB		= BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+	VIRTCHNL_LINK_SPEED_10GB	= BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_40GB	= BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_20GB	= BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_25GB	= BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_2_5GB	= BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+	VIRTCHNL_LINK_SPEED_5GB		= BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+	VIRTCHNL_RX_HSPLIT_NO_SPLIT      = 0,
+	VIRTCHNL_RX_HSPLIT_SPLIT_L2      = 1,
+	VIRTCHNL_RX_HSPLIT_SPLIT_IP      = 2,
+	VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+	VIRTCHNL_RX_HSPLIT_SPLIT_SCTP    = 8,
+};
+
+enum virtchnl_bw_limit_type {
+	VIRTCHNL_BW_SHAPER = 0,
+};
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ *
+ */
+	VIRTCHNL_OP_UNKNOWN = 0,
+	VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	VIRTCHNL_OP_RESET_VF = 2,
+	VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+	VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+	VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+	VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+	VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+	VIRTCHNL_OP_ENABLE_QUEUES = 8,
+	VIRTCHNL_OP_DISABLE_QUEUES = 9,
+	VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+	VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+	VIRTCHNL_OP_ADD_VLAN = 12,
+	VIRTCHNL_OP_DEL_VLAN = 13,
+	VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+	VIRTCHNL_OP_GET_STATS = 15,
+	VIRTCHNL_OP_RSVD = 16,
+	VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+	/* opcode 19 is reserved */
+	/* opcodes 20, 21, and 22 are reserved */
+	VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+	VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+	VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+	VIRTCHNL_OP_SET_RSS_HENA = 26,
+	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+	VIRTCHNL_OP_REQUEST_QUEUES = 29,
+	VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+	VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+	VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+	VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+	/* opcode 34 is reserved */
+	/* opcodes 38, 39, 40, 41, 42 and 43 are reserved */
+	/* opcode 44 is reserved */
+	VIRTCHNL_OP_ADD_RSS_CFG = 45,
+	VIRTCHNL_OP_DEL_RSS_CFG = 46,
+	VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
+	VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+	VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
+	VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+	VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+	VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+	VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+	VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+	VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
+	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+	VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
+	VIRTCHNL_OP_1588_PTP_SET_TIME = 62,
+	VIRTCHNL_OP_1588_PTP_ADJ_TIME = 63,
+	VIRTCHNL_OP_1588_PTP_ADJ_FREQ = 64,
+	VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP = 65,
+	VIRTCHNL_OP_GET_QOS_CAPS = 66,
+	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
+	VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS = 68,
+	VIRTCHNL_OP_1588_PTP_SET_PIN_CFG = 69,
+	VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP = 70,
+	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
+	VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
+	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
+	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_MAX,
+};
+
+static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
+{
+	switch (v_opcode) {
+	case VIRTCHNL_OP_UNKNOWN:
+		return "VIRTCHNL_OP_UNKNOWN";
+	case VIRTCHNL_OP_VERSION:
+		return "VIRTCHNL_OP_VERSION";
+	case VIRTCHNL_OP_RESET_VF:
+		return "VIRTCHNL_OP_RESET_VF";
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		return "VIRTCHNL_OP_GET_VF_RESOURCES";
+	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_TX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		return "VIRTCHNL_OP_CONFIG_RX_QUEUE";
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		return "VIRTCHNL_OP_CONFIG_VSI_QUEUES";
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		return "VIRTCHNL_OP_CONFIG_IRQ_MAP";
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+		return "VIRTCHNL_OP_ENABLE_QUEUES";
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		return "VIRTCHNL_OP_DISABLE_QUEUES";
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+		return "VIRTCHNL_OP_ADD_ETH_ADDR";
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		return "VIRTCHNL_OP_DEL_ETH_ADDR";
+	case VIRTCHNL_OP_ADD_VLAN:
+		return "VIRTCHNL_OP_ADD_VLAN";
+	case VIRTCHNL_OP_DEL_VLAN:
+		return "VIRTCHNL_OP_DEL_VLAN";
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE";
+	case VIRTCHNL_OP_GET_STATS:
+		return "VIRTCHNL_OP_GET_STATS";
+	case VIRTCHNL_OP_RSVD:
+		return "VIRTCHNL_OP_RSVD";
+	case VIRTCHNL_OP_EVENT:
+		return "VIRTCHNL_OP_EVENT";
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		return "VIRTCHNL_OP_CONFIG_RSS_KEY";
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		return "VIRTCHNL_OP_CONFIG_RSS_LUT";
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		return "VIRTCHNL_OP_GET_RSS_HENA_CAPS";
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		return "VIRTCHNL_OP_SET_RSS_HENA";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING";
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		return "VIRTCHNL_OP_REQUEST_QUEUES";
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		return "VIRTCHNL_OP_ENABLE_CHANNELS";
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		return "VIRTCHNL_OP_DISABLE_CHANNELS";
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+		return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
+	case VIRTCHNL_OP_ADD_RSS_CFG:
+		return "VIRTCHNL_OP_ADD_RSS_CFG";
+	case VIRTCHNL_OP_DEL_RSS_CFG:
+		return "VIRTCHNL_OP_DEL_RSS_CFG";
+	case VIRTCHNL_OP_ADD_FDIR_FILTER:
+		return "VIRTCHNL_OP_ADD_FDIR_FILTER";
+	case VIRTCHNL_OP_DEL_FDIR_FILTER:
+		return "VIRTCHNL_OP_DEL_FDIR_FILTER";
+	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
+		return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+		return "VIRTCHNL_OP_ADD_VLAN_V2";
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		return "VIRTCHNL_OP_DEL_VLAN_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2";
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_1588_PTP_SET_TIME:
+		return "VIRTCHNL_OP_1588_PTP_SET_TIME";
+	case VIRTCHNL_OP_1588_PTP_ADJ_TIME:
+		return "VIRTCHNL_OP_1588_PTP_ADJ_TIME";
+	case VIRTCHNL_OP_1588_PTP_ADJ_FREQ:
+		return "VIRTCHNL_OP_1588_PTP_ADJ_FREQ";
+	case VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP:
+		return "VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP";
+	case VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS:
+		return "VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS";
+	case VIRTCHNL_OP_1588_PTP_SET_PIN_CFG:
+		return "VIRTCHNL_OP_1588_PTP_SET_PIN_CFG";
+	case VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP:
+		return "VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP";
+	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
+	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
+		return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
+	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
+		return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
+	case VIRTCHNL_OP_MAX:
+		return "VIRTCHNL_OP_MAX";
+	default:
+		return "Unsupported (update virtchnl.h)";
+	}
+}
+
+static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)
+{
+	switch (v_status) {
+	case VIRTCHNL_STATUS_SUCCESS:
+		return "VIRTCHNL_STATUS_SUCCESS";
+	case VIRTCHNL_STATUS_ERR_PARAM:
+		return "VIRTCHNL_STATUS_ERR_PARAM";
+	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+		return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
+	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
+	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
+	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
+	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+		return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
+	case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+		return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
+	default:
+		return "Unknown status code (update virtchnl.h)";
+	}
+}
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+
+	/* avoid confusion with desc->opcode */
+	enum virtchnl_ops v_opcode;
+
+	/* ditto for desc->retval */
+	enum virtchnl_status_code v_retval;
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR		1
+#define VIRTCHNL_VERSION_MINOR		1
+#define VIRTCHNL_VERSION_MAJOR_2	2
+#define VIRTCHNL_VERSION_MINOR_0	0
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
+struct virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_ver) (((_ver)->major == 1) && ((_ver)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+#define VF_IS_V20(_ver) (((_ver)->major == 2) && ((_ver)->minor == 0))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+	VIRTCHNL_VSI_TYPE_INVALID = 0,
+	VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+
+	/* see enum virtchnl_vsi_type */
+	s32 vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2			BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_IWARP		BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA			VIRTCHNL_VF_OFFLOAD_IWARP
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ		BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG		BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR		BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES		BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		BIT(7)
+	/* BIT(8) is reserved */
+#define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
+#define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2	BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF		BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP		BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ			BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2		BIT(24)
+#define VIRTCHNL_VF_OFFLOAD_USO			BIT(25)
+	/* BIT(26) is reserved */
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF		BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS			BIT(29)
+	/* BIT(30) is reserved */
+#define VIRTCHNL_VF_CAP_PTP			BIT(31)
+
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+			       VIRTCHNL_VF_OFFLOAD_VLAN | \
+			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_cap_flags;
+	u32 rss_key_size;
+	u32 rss_lut_size;
+
+	struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled; /* deprecated with AVF 1.0 */
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+	VIRTCHNL_RXDID_0_16B_BASE		= 0,
+	VIRTCHNL_RXDID_1_32B_BASE		= 1,
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC		= 2,
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW		= 3,
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB	= 4,
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL	= 5,
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2		= 6,
+	VIRTCHNL_RXDID_7_HW_RSVD		= 7,
+	/* 8 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC		= 16,
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN	= 17,
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4	= 18,
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6	= 19,
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW	= 20,
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP		= 21,
+	/* 22 through 63 are reserved */
+};
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+	VIRTCHNL_RXDID_0_16B_BASE_M		= BIT(VIRTCHNL_RXDID_0_16B_BASE),
+	VIRTCHNL_RXDID_1_32B_BASE_M		= BIT(VIRTCHNL_RXDID_1_32B_BASE),
+	VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M		= BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
+	VIRTCHNL_RXDID_3_FLEX_SQ_SW_M		= BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
+	VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M	= BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
+	VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M	= BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),
+	VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M	= BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),
+	VIRTCHNL_RXDID_7_HW_RSVD_M		= BIT(VIRTCHNL_RXDID_7_HW_RSVD),
+	/* 9 through 15 are reserved */
+	VIRTCHNL_RXDID_16_COMMS_GENERIC_M	= BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),
+	VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M	= BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),
+	VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M	= BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),
+	VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M	= BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),
+	VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M	= BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),
+	VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M	= BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),
+	/* 22 through 63 are reserved */
+};
+
+/* virtchnl_rxq_info_flags
+ *
+ * Definition of bits in the flags field of the virtchnl_rxq_info structure.
+ */
+enum virtchnl_rxq_info_flags {
+	/* If the VIRTCHNL_PTP_RX_TSTAMP bit of the flag field is set, this is
+	 * a request to enable Rx timestamp. Other flag bits are currently
+	 * reserved and they may be extended in the future.
+	 */
+	VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u8 crc_disable;
+	u8 pad1[3];
+	u64 dma_ring_addr;
+
+	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+	s32 rx_split_pos;
+	u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct virtchnl_txq_info txq;
+	struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	u32 pad;
+	struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF.  Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated.  This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support.  If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+	u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_GET_MAX_RSS_QREGION
+ *
+ * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
+ * then this op must be supported.
+ *
+ * VF sends this message in order to query the max RSS queue region
+ * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled.
+ * This information should be used when configuring the RSS LUT and/or
+ * configuring queue region based filters.
+ *
+ * The maximum RSS queue region is 2^qregion_width. So, a qregion_width
+ * of 6 would inform the VF that the PF supports a maximum RSS queue region
+ * of 64.
+ *
+ * A queue region represents a range of queues that can be used to configure
+ * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue
+ * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able
+ * to configure the RSS LUT with queue indices from 0 to 15. However, other
+ * filters can be used to direct packets to queues >15 via specifying a queue
+ * base/offset and queue region width.
+ */
+struct virtchnl_max_rss_qregion {
+	u16 vport_id;
+	u16 qregion_width;
+	u8 pad[4];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
+struct virtchnl_ether_addr {
+	u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+	u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY	0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY	1
+#define VIRTCHNL_ETHER_ADDR_EXTRA	2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK	3 /* first two bits of type are valid */
+	u8 pad;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ *	VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ *	VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *	VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+	VIRTCHNL_VLAN_UNSUPPORTED =		0,
+	VIRTCHNL_VLAN_ETHERTYPE_8100 =		0x00000001,
+	VIRTCHNL_VLAN_ETHERTYPE_88A8 =		0x00000002,
+	VIRTCHNL_VLAN_ETHERTYPE_9100 =		0x00000004,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 =	0x00000100,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 =	0x00000200,
+	VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 =	0x00000400,
+	VIRTCHNL_VLAN_PRIO =			0x01000000,
+	VIRTCHNL_VLAN_FILTER_MASK =		0x10000000,
+	VIRTCHNL_VLAN_ETHERTYPE_AND =		0x20000000,
+	VIRTCHNL_VLAN_ETHERTYPE_XOR =		0x40000000,
+	VIRTCHNL_VLAN_TOGGLE =			0x80000000
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+	u32 outer;
+	u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+	struct virtchnl_vlan_supported_caps filtering_support;
+	u32 ethertype_init;
+	u16 max_filters;
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+	VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+	VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+	struct virtchnl_vlan_supported_caps stripping_support;
+	struct virtchnl_vlan_supported_caps insertion_support;
+	u32 ethertype_init;
+	u8 ethertype_match;
+	u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+	struct virtchnl_vlan_filtering_caps filtering;
+	struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+	u16 tci;	/* tci[15:13] = PCP and tci[11:0] = VID */
+	u16 tci_mask;	/* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+			 * filtering caps
+			 */
+	u16 tpid;	/* 0x8100, 0x88a8, etc. and only type(s) set in
+			 * filtering caps. Note that tpid here does not refer to
+			 * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+			 * actual 2-byte VLAN TPID
+			 */
+	u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+	struct virtchnl_vlan inner;
+	struct virtchnl_vlan outer;
+	u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+	u16 vport_id;
+	u16 num_elements;
+	u8 pad[4];
+	struct virtchnl_vlan_filter filters[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ *			VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2
+ *
+ * VF sends this message to enable or disable VLAN filtering. It also needs to
+ * specify an ethertype. The VF knows which VLAN ethertypes are allowed and
+ * whether or not it's allowed to enable/disable filtering via the
+ * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.filtering fields to determine which, if any,
+ * filtering messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.filtering in the
+ * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8
+ * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND
+ * means that all filtering ethertypes will to be enabled and disabled together
+ * regardless of the request from the VF. This means that the underlying
+ * hardware only supports VLAN filtering for all VLAN the specified ethertypes
+ * or none of them.
+ *
+ * virtchnl_vlan_caps.filtering.filtering_support.outer =
+ *			VIRTCHNL_VLAN_TOGGLE |
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTHCNL_VLAN_ETHERTYPE_88A8 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100
+ * VLANs aren't supported by the VF driver), the VF would populate the
+ * virtchnl_vlan_setting structure in the following manner and send the
+ * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used
+ * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the
+ * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ *			VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ *			VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ *
+ */
+struct virtchnl_vlan_setting {
+	u32 outer_ethertype_setting;
+	u32 inner_ethertype_setting;
+	u16 vport_id;
+	u8 pad[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC	0x00000001
+#define FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+	u64 rx_bytes;			/* received bytes */
+	u64 rx_unicast;			/* received unicast pkts */
+	u64 rx_multicast;		/* received multicast pkts */
+	u64 rx_broadcast;		/* received broadcast pkts */
+	u64 rx_discards;
+	u64 rx_unknown_protocol;
+	u64 tx_bytes;			/* transmitted bytes */
+	u64 tx_unicast;			/* transmitted unicast pkts */
+	u64 tx_multicast;		/* transmitted multicast pkts */
+	u64 tx_broadcast;		/* transmitted broadcast pkts */
+	u64 tx_discards;
+	u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+	u16 vsi_id;
+	u16 key_len;
+	u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+	u16 vsi_id;
+	u16 lut_entries;
+	u8 lut[1];        /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* enum virthcnl_hash_filter
+ *
+ * Bits defining the hash filters in the hena field of the virtchnl_rss_hena
+ * structure. Each bit indicates a specific hash filter for RSS.
+ *
+ * Note that not all bits are supported on all hardware. The VF should use
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of
+ * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters.
+ */
+enum virtchnl_hash_filter {
+	/* Bits 0 through 28 are reserved for future use */
+	/* Bit 29, 30, and 32 are not supported on XL710 a X710 */
+	VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP		= 29,
+	VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP		= 30,
+	VIRTCHNL_HASH_FILTER_IPV4_UDP			= 31,
+	VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK	= 32,
+	VIRTCHNL_HASH_FILTER_IPV4_TCP			= 33,
+	VIRTCHNL_HASH_FILTER_IPV4_SCTP			= 34,
+	VIRTCHNL_HASH_FILTER_IPV4_OTHER			= 35,
+	VIRTCHNL_HASH_FILTER_FRAG_IPV4			= 36,
+	/* Bits 37 and 38 are reserved for future use */
+	/* Bit 39, 40, and 42 are not supported on XL710 a X710 */
+	VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP		= 39,
+	VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP		= 40,
+	VIRTCHNL_HASH_FILTER_IPV6_UDP			= 41,
+	VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK	= 42,
+	VIRTCHNL_HASH_FILTER_IPV6_TCP			= 43,
+	VIRTCHNL_HASH_FILTER_IPV6_SCTP			= 44,
+	VIRTCHNL_HASH_FILTER_IPV6_OTHER			= 45,
+	VIRTCHNL_HASH_FILTER_FRAG_IPV6			= 46,
+	/* Bit 37 is reserved for future use */
+	VIRTCHNL_HASH_FILTER_FCOE_OX			= 48,
+	VIRTCHNL_HASH_FILTER_FCOE_RX			= 49,
+	VIRTCHNL_HASH_FILTER_FCOE_OTHER			= 50,
+	/* Bits 51 through 62 are reserved for future use */
+	VIRTCHNL_HASH_FILTER_L2_PAYLOAD			= 63,
+};
+
+#define VIRTCHNL_HASH_FILTER_INVALID	(0)
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+	/* see enum virtchnl_hash_filter */
+	u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+	VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC	= 0,
+	VIRTCHNL_RSS_ALG_R_ASYMMETRIC		= 1,
+	VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC	= 2,
+	VIRTCHNL_RSS_ALG_XOR_SYMMETRIC		= 3,
+};
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+	u16 count; /* number of queues in a channel */
+	u16 offset; /* queues in a channel start from 'offset' */
+	u32 pad;
+	u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+	u32	num_tc;
+	u32	pad;
+	struct	virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+	u8	src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+	u8	dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+	/* vlan_prio is part of this 16 bit field even from OS perspective
+	 * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+	 * in future, when decided to offload vlan_prio, pass that information
+	 * as part of the "vlan_id" field, Bit14..12
+	 */
+	__be16	vlan_id;
+	__be16	pad; /* reserved for future use */
+	__be32	src_ip[4];
+	__be32	dst_ip[4];
+	__be16	src_port;
+	__be16	dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+	struct	virtchnl_l4_spec tcp_spec;
+	u8	buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+	/* action types */
+	VIRTCHNL_ACTION_DROP = 0,
+	VIRTCHNL_ACTION_TC_REDIRECT,
+	VIRTCHNL_ACTION_PASSTHRU,
+	VIRTCHNL_ACTION_QUEUE,
+	VIRTCHNL_ACTION_Q_REGION,
+	VIRTCHNL_ACTION_MARK,
+	VIRTCHNL_ACTION_COUNT,
+};
+
+enum virtchnl_flow_type {
+	/* flow types */
+	VIRTCHNL_TCP_V4_FLOW = 0,
+	VIRTCHNL_TCP_V6_FLOW,
+	VIRTCHNL_UDP_V4_FLOW,
+	VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+	union	virtchnl_flow_spec data;
+	union	virtchnl_flow_spec mask;
+
+	/* see enum virtchnl_flow_type */
+	s32	flow_type;
+
+	/* see enum virtchnl_action */
+	s32	action;
+	u32	action_meta;
+	u8	field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+struct virtchnl_shaper_bw {
+	/* Unit is Kbps */
+	u32 committed;
+	u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
+
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+	VIRTCHNL_EVENT_UNKNOWN = 0,
+	VIRTCHNL_EVENT_LINK_CHANGE,
+	VIRTCHNL_EVENT_RESET_IMPENDING,
+	VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO		0
+#define PF_EVENT_SEVERITY_ATTENTION	1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED	2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct virtchnl_pf_event {
+	/* see enum virtchnl_event_codes */
+	s32 event;
+	union {
+		/* If the PF driver does not support the new speed reporting
+		 * capabilities then use link_event else use link_event_adv to
+		 * get the speed and link information. The ability to understand
+		 * new speeds is indicated by setting the capability flag
+		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+		 * in virtchnl_vf_resource struct and can be used to determine
+		 * which link event struct to use below.
+		 */
+		struct {
+			enum virtchnl_link_speed link_speed;
+			bool link_status;
+			u8 pad[3];
+		} link_event;
+		struct {
+			/* link_speed provided in Mbps */
+			u32 link_speed;
+			u8 link_status;
+			u8 pad[3];
+		} link_event_adv;
+	} event_data;
+
+	s32 severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+	VIRTCHNL_VFR_INPROGRESS = 0,
+	VIRTCHNL_VFR_COMPLETED,
+	VIRTCHNL_VFR_VFACTIVE,
+};
+
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
+#define PROTO_HDR_SHIFT			5
+#define PROTO_HDR_FIELD_START(proto_hdr_type) \
+					(proto_hdr_type << PROTO_HDR_SHIFT)
+#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
+
+/* VF use these macros to configure each protocol header.
+ * Specify which protocol headers and protocol header fields base on
+ * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
+ * @param hdr: a struct of virtchnl_proto_hdr
+ * @param hdr_type: ETH/IPV4/TCP, etc
+ * @param field: SRC/DST/TEID/SPI, etc
+ */
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
+	((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
+	((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
+	((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
+#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr)	((hdr)->field_selector)
+
+#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+	(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
+		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
+	(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
+		VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
+
+#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
+	((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
+#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
+	(((hdr)->type) >> PROTO_HDR_SHIFT)
+#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
+	((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
+#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
+	(VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
+	 VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl_proto_hdr_type {
+	VIRTCHNL_PROTO_HDR_NONE,
+	VIRTCHNL_PROTO_HDR_ETH,
+	VIRTCHNL_PROTO_HDR_S_VLAN,
+	VIRTCHNL_PROTO_HDR_C_VLAN,
+	VIRTCHNL_PROTO_HDR_IPV4,
+	VIRTCHNL_PROTO_HDR_IPV6,
+	VIRTCHNL_PROTO_HDR_TCP,
+	VIRTCHNL_PROTO_HDR_UDP,
+	VIRTCHNL_PROTO_HDR_SCTP,
+	VIRTCHNL_PROTO_HDR_GTPU_IP,
+	VIRTCHNL_PROTO_HDR_GTPU_EH,
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+	VIRTCHNL_PROTO_HDR_PPPOE,
+	VIRTCHNL_PROTO_HDR_L2TPV3,
+	VIRTCHNL_PROTO_HDR_ESP,
+	VIRTCHNL_PROTO_HDR_AH,
+	VIRTCHNL_PROTO_HDR_PFCP,
+	VIRTCHNL_PROTO_HDR_GTPC,
+	VIRTCHNL_PROTO_HDR_ECPRI,
+	VIRTCHNL_PROTO_HDR_L2TPV2,
+	VIRTCHNL_PROTO_HDR_PPP,
+	/* IPv4 and IPv6 Fragment header types are only associated to
+	 * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+	 * cannot be used independently.
+	 */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+	VIRTCHNL_PROTO_HDR_GRE,
+};
+
+/* Protocol header field within a protocol header. */
+enum virtchnl_proto_hdr_field {
+	/* ETHER */
+	VIRTCHNL_PROTO_HDR_ETH_SRC =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
+	VIRTCHNL_PROTO_HDR_ETH_DST,
+	VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
+	/* S-VLAN */
+	VIRTCHNL_PROTO_HDR_S_VLAN_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
+	/* C-VLAN */
+	VIRTCHNL_PROTO_HDR_C_VLAN_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
+	/* IPV4 */
+	VIRTCHNL_PROTO_HDR_IPV4_SRC =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
+	VIRTCHNL_PROTO_HDR_IPV4_DST,
+	VIRTCHNL_PROTO_HDR_IPV4_DSCP,
+	VIRTCHNL_PROTO_HDR_IPV4_TTL,
+	VIRTCHNL_PROTO_HDR_IPV4_PROT,
+	VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
+	/* IPV6 */
+	VIRTCHNL_PROTO_HDR_IPV6_SRC =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
+	VIRTCHNL_PROTO_HDR_IPV6_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_TC,
+	VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
+	VIRTCHNL_PROTO_HDR_IPV6_PROT,
+	/* IPV6 Prefix */
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
+	VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
+	/* TCP */
+	VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
+	VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
+	/* UDP */
+	VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
+	VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
+	/* SCTP */
+	VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
+	VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+	VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
+	/* GTPU_IP */
+	VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
+	/* GTPU_EH */
+	VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
+	VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
+	/* PPPOE */
+	VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
+	/* L2TPV3 */
+	VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
+	/* ESP */
+	VIRTCHNL_PROTO_HDR_ESP_SPI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
+	/* AH */
+	VIRTCHNL_PROTO_HDR_AH_SPI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
+	/* PFCP */
+	VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
+	VIRTCHNL_PROTO_HDR_PFCP_SEID,
+	/* GTPC */
+	VIRTCHNL_PROTO_HDR_GTPC_TEID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+	/* ECPRI */
+	VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+	VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+	/* IPv4 Dummy Fragment */
+	VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+	/* IPv6 Extension Fragment */
+	VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+	/* GTPU_DWN/UP */
+	VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+	VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+	/* L2TPv2 */
+	VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+		PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+	VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
+};
+
+struct virtchnl_proto_hdr {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 field_selector; /* a bit mask to select field for header type */
+	u8 buffer[64];
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
+
+struct virtchnl_proto_hdrs {
+	u8 tunnel_level;
+	/**
+	 * specify where protocol header start from.
+	 * must be 0 when sending a raw packet request.
+	 * 0 - from the outer layer
+	 * 1 - from the first inner layer
+	 * 2 - from the second inner layer
+	 * ....
+	 */
+	int count;
+	/**
+	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
+	 * must be 0 for a raw packet request.
+	 */
+	union {
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct {
+			u16 pkt_len;
+			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+			u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+		} raw;
+	};
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
+
+struct virtchnl_rss_cfg {
+	struct virtchnl_proto_hdrs proto_hdrs;	   /* protocol headers */
+
+	/* see enum virtchnl_rss_algorithm; rss algorithm type */
+	s32 rss_algorithm;
+	u8 reserved[128];                          /* reserve for future */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
+
+/* action configuration for FDIR */
+struct virtchnl_filter_action {
+	/* see enum virtchnl_action type */
+	s32 type;
+	union {
+		/* used for queue and qgroup action */
+		struct {
+			u16 index;
+			u8 region;
+		} queue;
+		/* used for count action */
+		struct {
+			/* share counter ID with other flow rules */
+			u8 shared;
+			u32 id; /* counter ID */
+		} count;
+		/* used for mark action */
+		u32 mark_id;
+		u8 reserve[32];
+	} act_conf;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
+
+#define VIRTCHNL_MAX_NUM_ACTIONS  8
+
+struct virtchnl_filter_action_set {
+	/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
+	int count;
+	struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
+
+/* pattern and action for FDIR rule */
+struct virtchnl_fdir_rule {
+	struct virtchnl_proto_hdrs proto_hdrs;
+	struct virtchnl_filter_action_set action_set;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
+
+/* Status returned to VF after VF requests FDIR commands
+ * VIRTCHNL_FDIR_SUCCESS
+ * VF FDIR related request is successfully done by PF
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
+ * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_EXIST
+ * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
+ * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
+ * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_INVALID
+ * OP_ADD_FDIR_FILTER request is failed due to parameters validation
+ * or HW doesn't support.
+ *
+ * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
+ * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
+ * for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
+ */
+enum virtchnl_fdir_prgm_status {
+	VIRTCHNL_FDIR_SUCCESS = 0,
+	VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
+	VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
+	VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
+	VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+	VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
+};
+
+/* VIRTCHNL_OP_ADD_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only and rule_cfg. PF will return flow_id
+ * if the request is successfully done and return add_status to VF.
+ */
+struct virtchnl_fdir_add {
+	u16 vsi_id;  /* INPUT */
+	/*
+	 * 1 for validating a fdir rule, 0 for creating a fdir rule.
+	 * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
+	 */
+	u16 validate_only; /* INPUT */
+	u32 flow_id;       /* OUTPUT */
+	struct virtchnl_fdir_rule rule_cfg; /* INPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
+
+/* VIRTCHNL_OP_DEL_FDIR_FILTER
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return del_status to VF.
+ */
+struct virtchnl_fdir_del {
+	u16 vsi_id;  /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+
+	/* see enum virtchnl_fdir_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
+ */
+struct virtchnl_qos_cap_elem {
+	u8 tc_num;
+	u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT      0
+#define VIRTCHNL_ABITER_ETS         2
+	u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT      1
+	u8 weight;
+	enum virtchnl_bw_limit_type type;
+	union {
+		struct virtchnl_shaper_bw shaper;
+		u8 pad2[32];
+	};
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+	u16 vsi_id;
+	u16 num_elem;
+	struct virtchnl_qos_cap_elem cap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP
+ * VF sends message virtchnl_queue_tc_mapping to set queue to tc
+ * mapping for all the Tx and Rx queues with a specified VSI, and
+ * would get response about bitmap of valid user priorities
+ * associated with queues.
+ */
+struct virtchnl_queue_tc_mapping {
+	u16 vsi_id;
+	u16 num_tc;
+	u16 num_queue_pairs;
+	u8 pad[2];
+	union {
+		struct {
+			u16 start_queue_id;
+			u16 queue_count;
+		} req;
+		struct {
+#define VIRTCHNL_USER_PRIO_TYPE_UP	0
+#define VIRTCHNL_USER_PRIO_TYPE_DSCP	1
+			u16 prio_type;
+			u16 valid_prio_bitmap;
+		} resp;
+	} tc[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+	u16 queue_id;
+	u8 tc;
+	u8 pad;
+	struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+	u16 vsi_id;
+	u16 num_queues;
+	struct virtchnl_queue_bw cfg[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
+
+/* queue types */
+enum virtchnl_queue_type {
+	VIRTCHNL_QUEUE_TYPE_TX			= 0,
+	VIRTCHNL_QUEUE_TYPE_RX			= 1,
+};
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl_queue_chunk {
+	/* see enum virtchnl_queue_type */
+	s32 type;
+	u16 start_queue_id;
+	u16 num_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct virtchnl_queue_chunks {
+	u16 num_chunks;
+	u16 rsvd;
+	struct virtchnl_queue_chunk chunks[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES_V2
+ * VIRTCHNL_OP_DISABLE_QUEUES_V2
+ *
+ * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in
+ * VIRTCHNL_OP_GET_VF_RESOURCES
+ *
+ * VF sends virtchnl_ena_dis_queues struct to specify the queues to be
+ * enabled/disabled in chunks. Also applicable to single queue RX or
+ * TX. PF performs requested action and returns status.
+ */
+struct virtchnl_del_ena_dis_queues {
+	u16 vport_id;
+	u16 pad;
+	struct virtchnl_queue_chunks chunks;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues);
+
+/* Virtchannel interrupt throttling rate index */
+enum virtchnl_itr_idx {
+	VIRTCHNL_ITR_IDX_0	= 0,
+	VIRTCHNL_ITR_IDX_1	= 1,
+	VIRTCHNL_ITR_IDX_NO_ITR	= 3,
+};
+
+/* Queue to vector mapping */
+struct virtchnl_queue_vector {
+	u16 queue_id;
+	u16 vector_id;
+	u8 pad[4];
+
+	/* see enum virtchnl_itr_idx */
+	s32 itr_idx;
+
+	/* see enum virtchnl_queue_type */
+	s32 queue_type;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
+
+/* VIRTCHNL_OP_MAP_QUEUE_VECTOR
+ *
+ * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated
+ * in VIRTCHNL_OP_GET_VF_RESOURCES
+ *
+ * VF sends this message to map queues to vectors and ITR index registers.
+ * External data buffer contains virtchnl_queue_vector_maps structure
+ * that contains num_qv_maps of virtchnl_queue_vector structures.
+ * PF maps the requested queue vector maps after validating the queue and vector
+ * ids and returns a status code.
+ */
+struct virtchnl_queue_vector_maps {
+	u16 vport_id;
+	u16 num_qv_maps;
+	u8 pad[4];
+	struct virtchnl_queue_vector qv_maps[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
+
+struct virtchnl_quanta_cfg {
+	u16 quanta_size;
+	struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
+/* VIRTCHNL_VF_CAP_PTP
+ *   VIRTCHNL_OP_1588_PTP_GET_CAPS
+ *   VIRTCHNL_OP_1588_PTP_GET_TIME
+ *   VIRTCHNL_OP_1588_PTP_SET_TIME
+ *   VIRTCHNL_OP_1588_PTP_ADJ_TIME
+ *   VIRTCHNL_OP_1588_PTP_ADJ_FREQ
+ *   VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP
+ *   VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS
+ *   VIRTCHNL_OP_1588_PTP_SET_PIN_CFG
+ *   VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP
+ *
+ * Support for offloading control of the device PTP hardware clock (PHC) is enabled
+ * by VIRTCHNL_VF_CAP_PTP. This capability allows a VF to request that PF
+ * enable Tx and Rx timestamps, and request access to read and/or write the
+ * PHC on the device, as well as query if the VF has direct access to the PHC
+ * time registers.
+ *
+ * The VF must set VIRTCHNL_VF_CAP_PTP in its capabilities when requesting
+ * resources. If the capability is set in reply, the VF must then send
+ * a VIRTCHNL_OP_1588_PTP_GET_CAPS request during initialization. The VF indicates
+ * what extended capabilities it wants by setting the appropriate flags in the
+ * caps field. The PF reply will indicate what features are enabled for
+ * that VF.
+ */
+#define VIRTCHNL_1588_PTP_CAP_TX_TSTAMP		BIT(0)
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP		BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC		BIT(2)
+#define VIRTCHNL_1588_PTP_CAP_WRITE_PHC		BIT(3)
+#define VIRTCHNL_1588_PTP_CAP_PHC_REGS		BIT(4)
+#define VIRTCHNL_1588_PTP_CAP_PIN_CFG		BIT(5)
+
+/**
+ * virtchnl_phc_regs
+ *
+ * Structure defines how the VF should access PHC related registers. The VF
+ * must request VIRTCHNL_1588_PTP_CAP_PHC_REGS. If the VF has access to PHC
+ * registers, the PF will reply with the capability flag set, and with this
+ * structure detailing what PCIe region and what offsets to use. If direct
+ * access is not available, this entire structure is reserved and the fields
+ * will be zero.
+ *
+ * If necessary in a future extension, a separate capability mutually
+ * exclusive with VIRTCHNL_1588_PTP_CAP_PHC_REGS might be used to change the
+ * entire format of this structure within virtchnl_ptp_caps.
+ *
+ * @clock_hi: Register offset of the high 32 bits of clock time
+ * @clock_lo: Register offset of the low 32 bits of clock time
+ * @pcie_region: The PCIe region the registers are located in.
+ * @rsvd: Reserved bits for future extension
+ */
+struct virtchnl_phc_regs {
+	u32 clock_hi;
+	u32 clock_lo;
+	u8 pcie_region;
+	u8 rsvd[15];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_regs);
+
+/* timestamp format enumeration
+ *
+ * VIRTCHNL_1588_PTP_TSTAMP_40BIT
+ *
+ *   This format indicates a timestamp that uses the 40bit format from the
+ *   flexible Rx descriptors. It is also the default Tx timestamp format used
+ *   today.
+ *
+ *   Such a timestamp has the following 40bit format:
+ *
+ *   *--------------------------------*-------------------------------*-----------*
+ *   | 32 bits of time in nanoseconds | 7 bits of sub-nanosecond time | valid bit |
+ *   *--------------------------------*-------------------------------*-----------*
+ *
+ *   The timestamp is passed in a u64, with the upper 24bits of the field
+ *   reserved as zero.
+ *
+ *   With this format, in order to report a full 64bit timestamp to userspace
+ *   applications, the VF is responsible for performing timestamp extension by
+ *   carefully comparing the timestamp with the PHC time. This can correctly
+ *   be achieved with a recent cached copy of the PHC time by doing delta
+ *   comparison between the 32bits of nanoseconds in the timestamp with the
+ *   lower 32 bits of the clock time. For this to work, the cached PHC time
+ *   must be from within 2^31 nanoseconds (~2.1 seconds) of when the timestamp
+ *   was captured.
+ *
+ * VIRTCHNL_1588_PTP_TSTAMP_64BIT_NS
+ *
+ *   This format indicates a timestamp that is 64 bits of nanoseconds.
+ */
+enum virtchnl_ptp_tstamp_format {
+	VIRTCHNL_1588_PTP_TSTAMP_40BIT = 0,
+	VIRTCHNL_1588_PTP_TSTAMP_64BIT_NS = 1,
+};
+
+/**
+ * virtchnl_ptp_caps
+ *
+ * Structure that defines the PTP capabilities available to the VF. The VF
+ * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field
+ * indicating what capabilities it is requesting. The PF will respond with the
+ * same message with the virtchnl_ptp_caps structure indicating what is
+ * enabled for the VF.
+ *
+ * @phc_regs: If VIRTCHNL_1588_PTP_CAP_PHC_REGS is set, contains information
+ *            on the PHC related registers available to the VF.
+ * @caps: On send, VF sets what capabilities it requests. On reply, PF
+ *        indicates what has been enabled for this VF. The PF shall not set
+ *        bits which were not requested by the VF.
+ * @max_adj: The maximum adjustment capable of being requested by
+ *           VIRTCHNL_OP_1588_PTP_ADJ_FREQ, in parts per billion. Note that 1 ppb
+ *           is approximately 65.5 scaled_ppm. The PF shall clamp any
+ *           frequency adjustment in VIRTCHNL_op_1588_ADJ_FREQ to +/- max_adj.
+ *           Use of ppb in this field allows fitting the value into 4 bytes
+ *           instead of potentially requiring 8 if scaled_ppm units were used.
+ * @tx_tstamp_idx: The Tx timestamp index to set in the transmit descriptor
+ *                 when requesting a timestamp for an outgoing packet.
+ *                 Reserved if VIRTCHNL_1588_PTP_CAP_TX_TSTAMP is not enabled.
+ * @n_ext_ts: Number of external timestamp functions available. Reserved
+ *            if VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.
+ * @n_per_out: Number of periodic output functions available. Reserved if
+ *             VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.
+ * @n_pins: Number of physical programmable pins able to be controlled.
+ *          Reserved if VIRTCHNL_1588_PTP_CAP_PIN_CFG is not enabled.
+ * @tx_tstamp_format: Format of the Tx timestamps. Valid formats are defined
+ *                    by the virtchnl_ptp_tstamp enumeration. Note that Rx
+ *                    timestamps are tied to the descriptor format, and do not
+ *                    have a separate format field.
+ * @rsvd: Reserved bits for future extension.
+ *
+ * PTP capabilities
+ *
+ * VIRTCHNL_1588_PTP_CAP_TX_TSTAMP indicates that the VF can request transmit
+ * timestamps for packets in its transmit descriptors. If this is unset,
+ * transmit timestamp requests are ignored. Note that only one outstanding Tx
+ * timestamp request will be honored at a time. The PF shall handle receipt of
+ * the timestamp from the hardware, and will forward this to the VF by sending
+ * a VIRTCHNL_OP_1588_TX_TIMESTAMP message.
+ *
+ * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have
+ * receive timestamps enabled in the flexible descriptors. Note that this
+ * requires a VF to also negotiate to enable advanced flexible descriptors in
+ * the receive path instead of the default legacy descriptor format.
+ *
+ * For a detailed description of the current Tx and Rx timestamp format, see
+ * the section on virtchnl_phc_tx_tstamp. Future extensions may indicate
+ * timestamp format in the capability structure.
+ *
+ * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time
+ * via the VIRTCHNL_OP_1588_PTP_GET_TIME command, or by directly reading PHC
+ * registers if VIRTCHNL_1588_PTP_CAP_PHC_REGS is also set.
+ *
+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC indicates that the VF may request updates
+ * to the PHC time via VIRTCHNL_OP_1588_PTP_SET_TIME,
+ * VIRTCHNL_OP_1588_PTP_ADJ_TIME, and VIRTCHNL_OP_1588_PTP_ADJ_FREQ.
+ *
+ * VIRTCHNL_1588_PTP_CAP_PHC_REGS indicates that the VF has direct access to
+ * certain PHC related registers, primarily for lower latency access to the
+ * PHC time. If this is set, the VF shall read the virtchnl_phc_regs section
+ * of the capabilities to determine the location of the clock registers. If
+ * this capability is not set, the entire 24 bytes of virtchnl_phc_regs is
+ * reserved as zero. Future extensions define alternative formats for this
+ * data, in which case they will be mutually exclusive with this capability.
+ *
+ * VIRTCHNL_1588_PTP_CAP_PIN_CFG indicates that the VF has the capability to
+ * control software defined pins. These pins can be assigned either as an
+ * input to timestamp external events, or as an output to cause a periodic
+ * signal output.
+ *
+ * Note that in the future, additional capability flags may be added which
+ * indicate additional extended support. All fields marked as reserved by this
+ * header will be set to zero. VF implementations should verify this to ensure
+ * that future extensions do not break compatibility.
+ */
+struct virtchnl_ptp_caps {
+	struct virtchnl_phc_regs phc_regs;
+	u32 caps;
+	s32 max_adj;
+	u8 tx_tstamp_idx;
+	u8 n_ext_ts;
+	u8 n_per_out;
+	u8 n_pins;
+	/* see enum virtchnl_ptp_tstamp_format */
+	u8 tx_tstamp_format;
+	u8 rsvd[11];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+/**
+ * virtchnl_phc_time
+ * @time: PHC time in nanoseconds
+ * @rsvd: Reserved for future extension
+ *
+ * Structure sent with VIRTCHNL_OP_1588_PTP_SET_TIME and received with
+ * VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits of PHC clock time in
+ * nanoseconds.
+ *
+ * VIRTCHNL_OP_1588_PTP_SET_TIME may be sent by the VF if
+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC is set. This will request that the PHC time
+ * be set to the requested value. This operation is non-atomic and thus does
+ * not adjust for the delay between request and completion. It is recommended
+ * that the VF use VIRTCHNL_OP_1588_PTP_ADJ_TIME and
+ * VIRTCHNL_OP_1588_PTP_ADJ_FREQ when possible to steer the PHC clock.
+ *
+ * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of
+ * the PHC. This op is available in case direct access via the PHC registers
+ * is not available.
+ */
+struct virtchnl_phc_time {
+	u64 time;
+	u8 rsvd[8];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
+/**
+ * virtchnl_phc_adj_time
+ * @delta: offset requested to adjust clock by
+ * @rsvd: reserved for future extension
+ *
+ * Sent with VIRTCHNL_OP_1588_PTP_ADJ_TIME. Used to request an adjustment of
+ * the clock time by the provided delta, with negative values representing
+ * subtraction. VIRTCHNL_OP_1588_PTP_ADJ_TIME may not be sent unless
+ * VIRTCHNL_1588_PTP_CAP_WRITE_PHC is set.
+ *
+ * The atomicity of this operation is not guaranteed. The PF should perform an
+ * atomic update using appropriate mechanisms if possible. However, this is
+ * not guaranteed.
+ */
+struct virtchnl_phc_adj_time {
+	s64 delta;
+	u8 rsvd[8];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_adj_time);
+
+/**
+ * virtchnl_phc_adj_freq
+ * @scaled_ppm: frequency adjustment represented in scaled parts per million
+ * @rsvd: Reserved for future extension
+ *
+ * Sent with the VIRTCHNL_OP_1588_PTP_ADJ_FREQ to request an adjustment to the
+ * clock frequency. The adjustment is in scaled_ppm, which is parts per
+ * million with a 16bit binary fractional portion. 1 part per billion is
+ * approximately 65.5 scaled_ppm.
+ *
+ *  ppm = scaled_ppm / 2^16
+ *
+ *  ppb = scaled_ppm * 1000 / 2^16 or
+ *
+ *  ppb = scaled_ppm * 125 / 2^13
+ *
+ * The PF shall clamp any adjustment request to plus or minus the specified
+ * max_adj in the PTP capabilities.
+ *
+ * Requests for adjustment are always based off of nominal clock frequency and
+ * not compounding. To reset clock frequency, send a request with a scaled_ppm
+ * of 0.
+ */
+struct virtchnl_phc_adj_freq {
+	s64 scaled_ppm;
+	u8 rsvd[8];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_adj_freq);
+
+/**
+ * virtchnl_phc_tx_stamp
+ * @tstamp: timestamp value
+ * @rsvd: Reserved for future extension
+ *
+ * Sent along with VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP from the PF when a Tx
+ * timestamp for the index associated with this VF in the tx_tstamp_idx field
+ * is captured by hardware.
+ *
+ * If VIRTCHNL_1588_PTP_CAP_TX_TSTAMP is set, the VF may request a timestamp
+ * for a packet in its transmit context descriptor by setting the appropriate
+ * flag and setting the timestamp index provided by the PF. On transmission,
+ * the timestamp will be captured and sent to the PF. The PF will forward this
+ * timestamp to the VF via the VIRTCHNL_1588_PTP_CAP_TX_TSTAMP op.
+ *
+ * The timestamp format is defined by the tx_tstamp_format field of the
+ * virtchnl_ptp_caps structure.
+ */
+struct virtchnl_phc_tx_tstamp {
+	u64 tstamp;
+	u8 rsvd[8];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_tx_tstamp);
+
+enum virtchnl_phc_pin_func {
+	VIRTCHNL_PHC_PIN_FUNC_NONE = 0, /* Not assigned to any function */
+	VIRTCHNL_PHC_PIN_FUNC_EXT_TS = 1, /* Assigned to external timestamp */
+	VIRTCHNL_PHC_PIN_FUNC_PER_OUT = 2, /* Assigned to periodic output */
+};
+
+/* Length of the pin configuration data. All pin configurations belong within
+ * the same union and *must* have this length in bytes.
+ */
+#define VIRTCHNL_PIN_CFG_LEN 64
+
+/* virtchnl_phc_ext_ts_mode
+ *
+ * Mode of the external timestamp, indicating which edges of the input signal
+ * to timestamp.
+ */
+enum virtchnl_phc_ext_ts_mode {
+	VIRTCHNL_PHC_EXT_TS_NONE = 0,
+	VIRTCHNL_PHC_EXT_TS_RISING_EDGE = 1,
+	VIRTCHNL_PHC_EXT_TS_FALLING_EDGE = 2,
+	VIRTCHNL_PHC_EXT_TS_BOTH_EDGES = 3,
+};
+
+/**
+ * virtchnl_phc_ext_ts
+ * @mode: mode of external timestamp request
+ * @rsvd: reserved for future extension
+ *
+ * External timestamp configuration. Defines the configuration for this
+ * external timestamp function.
+ *
+ * If mode is VIRTCHNL_PHC_EXT_TS_NONE, the function is essentially disabled,
+ * timestamping nothing.
+ *
+ * If mode is VIRTCHNL_PHC_EXT_TS_RISING_EDGE, the function shall timestamp
+ * the rising edge of the input when it transitions from low to high signal.
+ *
+ * If mode is VIRTCHNL_PHC_EXT_TS_FALLING_EDGE, the function shall timestamp
+ * the falling edge of the input when it transitions from high to low signal.
+ *
+ * If mode is VIRTCHNL_PHC_EXT_TS_BOTH_EDGES, the function shall timestamp
+ * both the rising and falling edge of the signal whenever it changes.
+ *
+ * The PF shall return an error if the requested mode cannot be implemented on
+ * the function.
+ */
+struct virtchnl_phc_ext_ts {
+	u8 mode; /* see virtchnl_phc_ext_ts_mode */
+	u8 rsvd[63];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(VIRTCHNL_PIN_CFG_LEN, virtchnl_phc_ext_ts);
+
+/* virtchnl_phc_per_out_flags
+ *
+ * Flags defining periodic output functionality.
+ */
+enum virtchnl_phc_per_out_flags {
+	VIRTCHNL_PHC_PER_OUT_PHASE_START = BIT(0),
+};
+
+/**
+ * virtchnl_phc_per_out
+ * @start: absolute start time (if VIRTCHNL_PHC_PER_OUT_PHASE_START unset)
+ * @phase: phase offset to start (if VIRTCHNL_PHC_PER_OUT_PHASE_START set)
+ * @period: time to complete a full clock cycle (low - > high -> low)
+ * @on: length of time the signal should stay high
+ * @flags: flags defining the periodic output operation.
+ * rsvd: reserved for future extension
+ *
+ * Configuration for a periodic output signal. Used to define the signal that
+ * should be generated on a given function.
+ *
+ * The period field determines the full length of the clock cycle, including
+ * both duration hold high transition and duration to hold low transition in
+ * nanoseconds.
+ *
+ * The on field determines how long the signal should remain high. For
+ * a traditional square wave clock that is on for some duration and off for
+ * the same duration, use an on length of precisely half the period. The duty
+ * cycle of the clock is period/on.
+ *
+ * If VIRTCHNL_PHC_PER_OUT_PHASE_START is unset, then the request is to start
+ * a clock an absolute time. This means that the clock should start precisely
+ * at the specified time in the start field. If the start time is in the past,
+ * then the periodic output should start at the next valid multiple of the
+ * period plus the start time:
+ *
+ *   new_start = (n * period) + start
+ *     (choose n such that new start is in the future)
+ *
+ * Note that the PF should not reject a start time in the past because it is
+ * possible that such a start time was valid when the request was made, but
+ * became invalid due to delay in programming the pin.
+ *
+ * If VIRTCHNL_PHC_PER_OUT_PHASE_START is set, then the request is to start
+ * the next multiple of the period plus the phase offset. The phase must be
+ * less than the period. In this case, the clock should start as soon possible
+ * at the next available multiple of the period. To calculate a start time
+ * when programming this mode, use:
+ *
+ *   start = (n * period) + phase
+ *     (choose n such that start is in the future)
+ *
+ * A period of zero should be treated as a request to disable the clock
+ * output.
+ */
+struct virtchnl_phc_per_out {
+	union {
+		u64 start;
+		u64 phase;
+	};
+	u64 period;
+	u64 on;
+	u32 flags;
+	u8 rsvd[36];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(VIRTCHNL_PIN_CFG_LEN, virtchnl_phc_per_out);
+
+/* virtchnl_phc_pin_cfg_flags
+ *
+ * Definition of bits in the flags field of the virtchnl_phc_pin_cfg
+ * structure.
+ */
+enum virtchnl_phc_pin_cfg_flags {
+	/* Valid for VIRTCHNL_OP_1588_PTP_SET_PIN_CFG. If set, indicates this
+	 * is a request to verify if the function can be assigned to the
+	 * provided pin. In this case, the ext_ts and per_out fields are
+	 * ignored, and the PF response must be an error if the pin cannot be
+	 * assigned to that function index.
+	 */
+	VIRTCHNL_PHC_PIN_CFG_VERIFY = BIT(0),
+};
+
+/**
+ * virtchnl_phc_set_pin
+ * @pin_index: The pin to get or set
+ * @func: the function type the pin is assigned to
+ * @func_index: the index of the function the pin is assigned to
+ * @ext_ts: external timestamp configuration
+ * @per_out: periodic output configuration
+ * @rsvd1: Reserved for future extension
+ * @rsvd2: Reserved for future extension
+ *
+ * Sent along with the VIRTCHNL_OP_1588_PTP_SET_PIN_CFG op.
+ *
+ * The VF issues a VIRTCHNL_OP_1588_PTP_SET_PIN_CFG to assign the pin to one
+ * of the functions. It must set the pin_index field, the func field, and
+ * the func_index field. The pin_index must be less than n_pins, and the
+ * func_index must be less than the n_ext_ts or n_per_out depending on which
+ * function type is selected. If func is for an external timestamp, the
+ * ext_ts field must be filled in with the desired configuration. Similarly,
+ * if the function is for a periodic output, the per_out field must be
+ * configured.
+ *
+ * If the VIRTCHNL_PHC_PIN_CFG_VERIFY bit of the flag field is set, this is
+ * a request only to verify the configuration, not to set it. In this case,
+ * the PF should simply report an error if the requested pin cannot be
+ * assigned to the requested function. This allows VF to determine whether or
+ * not a given function can be assigned to a specific pin. Other flag bits are
+ * currently reserved and must be verified as zero on both sides. They may be
+ * extended in the future.
+ */
+struct virtchnl_phc_set_pin {
+	u32 flags; /* see virtchnl_phc_pin_cfg_flags */
+	u8 pin_index;
+	u8 func; /* see virtchnl_phc_pin_func */
+	u8 func_index;
+	u8 rsvd1;
+	union {
+		struct virtchnl_phc_ext_ts ext_ts;
+		struct virtchnl_phc_per_out per_out;
+	};
+	u8 rsvd2[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(80, virtchnl_phc_set_pin);
+
+/**
+ * virtchnl_phc_pin
+ * @pin_index: The pin to get or set
+ * @func: the function type the pin is assigned to
+ * @func_index: the index of the function the pin is assigned to
+ * @rsvd: Reserved for future extension
+ * @name: human readable pin name, supplied by PF on GET_PIN_CFGS
+ *
+ * Sent by the PF as part of the VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS response.
+ *
+ * The VF issues a VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS request to the PF in
+ * order to obtain the current pin configuration for all of the pins that were
+ * assigned to this VF.
+ *
+ * This structure details the pin configuration state, including a pin name
+ * and which function is assigned to the pin currently.
+ */
+struct virtchnl_phc_pin {
+	u8 pin_index;
+	u8 func; /* see virtchnl_phc_pin_func */
+	u8 func_index;
+	u8 rsvd[5];
+	char name[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_phc_pin);
+
+/**
+ * virtchnl_phc_pin_cfg
+ * @len: length of the variable pin config array
+ * @pins: variable length pin configuration array
+ *
+ * Variable structure sent by the PF in reply to
+ * VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS. The VF does not send this structure with
+ * its request of the operation.
+ *
+ * It is possible that the PF may need to send more pin configuration data
+ * than can be sent in one virtchnl message. To handle this, the PF should
+ * issue multiple VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS responses. Each response
+ * will indicate the number of pins it covers. The VF should be ready to wait
+ * for multiple responses until it has received a total length equal to the
+ * number of n_pins negotiated during extended PTP capabilities exchange.
+ */
+struct virtchnl_phc_get_pins {
+	u8 len;
+	u8 rsvd[7];
+	struct virtchnl_phc_pin pins[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(80, virtchnl_phc_get_pins);
+
+/**
+ * virtchnl_phc_ext_stamp
+ * @tstamp: timestamp value
+ * @tstamp_rsvd: Reserved for future extension of the timestamp value.
+ * @tstamp_format: format of the timstamp
+ * @func_index: external timestamp function this timestamp is for
+ * @rsvd2: Reserved for future extension
+ *
+ * Sent along with the VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP from the PF when an
+ * external timestamp function is triggered.
+ *
+ * This will be sent only if one of the external timestamp functions is
+ * configured by the VF, and is only valid if VIRTCHNL_1588_PTP_CAP_PIN_CFG is
+ * negotiated with the PF.
+ *
+ * The timestamp format is defined by the tstamp_format field using the
+ * virtchnl_ptp_tstamp_format enumeration. The tstamp_rsvd field is
+ * exclusively reserved for possible future variants of the timestamp format,
+ * and its access will be controlled by the tstamp_format field.
+ */
+struct virtchnl_phc_ext_tstamp {
+	u64 tstamp;
+	u8 tstamp_rsvd[8];
+	u8 tstamp_format;
+	u8 func_index;
+	u8 rsvd2[6];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_ext_tstamp);
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+	VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+		sizeof(struct virtchnl_queue_pair_info),
+
+	VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+		sizeof(struct virtchnl_vector_map),
+
+	VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+		sizeof(struct virtchnl_ether_addr),
+
+	VIRTCHNL_OP_ADD_DEL_VLAN_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+		sizeof(u16),
+
+
+	VIRTCHNL_OP_ENABLE_CHANNELS_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+		sizeof(struct virtchnl_channel_info),
+
+	VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) /
+		sizeof(struct virtchnl_queue_chunk),
+
+	VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX	=
+		((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
+		sizeof(struct virtchnl_queue_vector),
+
+	VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX		=
+		((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) /
+		sizeof(struct virtchnl_vlan_filter),
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+			    u8 *msg, u16 msglen)
+{
+	bool err_msg_format = false;
+	u32 valid_len = 0;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case VIRTCHNL_OP_VERSION:
+		valid_len = sizeof(struct virtchnl_version_info);
+		break;
+	case VIRTCHNL_OP_RESET_VF:
+		break;
+	case VIRTCHNL_OP_GET_VF_RESOURCES:
+		if (VF_IS_V11(ver))
+			valid_len = sizeof(u32);
+		break;
+	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		valid_len = sizeof(struct virtchnl_txq_info);
+		break;
+	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		valid_len = sizeof(struct virtchnl_rxq_info);
+		break;
+	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_vsi_queue_config_info *vqc =
+			    (struct virtchnl_vsi_queue_config_info *)msg;
+
+			if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+			    VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vqc->num_queue_pairs *
+				      sizeof(struct
+					     virtchnl_queue_pair_info));
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		valid_len = sizeof(struct virtchnl_irq_map_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_irq_map_info *vimi =
+			    (struct virtchnl_irq_map_info *)msg;
+
+			if (vimi->num_vectors == 0 || vimi->num_vectors >
+			    VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vimi->num_vectors *
+				      sizeof(struct virtchnl_vector_map));
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_QUEUES:
+	case VIRTCHNL_OP_DISABLE_QUEUES:
+		valid_len = sizeof(struct virtchnl_queue_select);
+		break;
+	case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
+		break;
+	case VIRTCHNL_OP_ADD_ETH_ADDR:
+	case VIRTCHNL_OP_DEL_ETH_ADDR:
+		valid_len = sizeof(struct virtchnl_ether_addr_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_ether_addr_list *veal =
+			    (struct virtchnl_ether_addr_list *)msg;
+
+			if (veal->num_elements == 0 || veal->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += veal->num_elements *
+			    sizeof(struct virtchnl_ether_addr);
+		}
+		break;
+	case VIRTCHNL_OP_ADD_VLAN:
+	case VIRTCHNL_OP_DEL_VLAN:
+		valid_len = sizeof(struct virtchnl_vlan_filter_list);
+		if (msglen >= valid_len) {
+			struct virtchnl_vlan_filter_list *vfl =
+			    (struct virtchnl_vlan_filter_list *)msg;
+
+			if (vfl->num_elements == 0 || vfl->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += vfl->num_elements * sizeof(u16);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		valid_len = sizeof(struct virtchnl_promisc_info);
+		break;
+	case VIRTCHNL_OP_GET_STATS:
+		valid_len = sizeof(struct virtchnl_queue_select);
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_KEY:
+		valid_len = sizeof(struct virtchnl_rss_key);
+		if (msglen >= valid_len) {
+			struct virtchnl_rss_key *vrk =
+				(struct virtchnl_rss_key *)msg;
+
+			if (vrk->key_len == 0) {
+				/* zero length is allowed as input */
+				break;
+			}
+
+			valid_len += vrk->key_len - 1;
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_RSS_LUT:
+		valid_len = sizeof(struct virtchnl_rss_lut);
+		if (msglen >= valid_len) {
+			struct virtchnl_rss_lut *vrl =
+				(struct virtchnl_rss_lut *)msg;
+
+			if (vrl->lut_entries == 0) {
+				/* zero entries is allowed as input */
+				break;
+			}
+
+			valid_len += vrl->lut_entries - 1;
+		}
+		break;
+	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+		break;
+	case VIRTCHNL_OP_SET_RSS_HENA:
+		valid_len = sizeof(struct virtchnl_rss_hena);
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+		break;
+	case VIRTCHNL_OP_REQUEST_QUEUES:
+		valid_len = sizeof(struct virtchnl_vf_res_request);
+		break;
+	case VIRTCHNL_OP_ENABLE_CHANNELS:
+		valid_len = sizeof(struct virtchnl_tc_info);
+		if (msglen >= valid_len) {
+			struct virtchnl_tc_info *vti =
+				(struct virtchnl_tc_info *)msg;
+
+			if (vti->num_tc == 0 || vti->num_tc >
+			    VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vti->num_tc - 1) *
+				     sizeof(struct virtchnl_channel_info);
+		}
+		break;
+	case VIRTCHNL_OP_DISABLE_CHANNELS:
+		break;
+	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+		valid_len = sizeof(struct virtchnl_filter);
+		break;
+	case VIRTCHNL_OP_ADD_RSS_CFG:
+	case VIRTCHNL_OP_DEL_RSS_CFG:
+		valid_len = sizeof(struct virtchnl_rss_cfg);
+		break;
+	case VIRTCHNL_OP_ADD_FDIR_FILTER:
+		valid_len = sizeof(struct virtchnl_fdir_add);
+		break;
+	case VIRTCHNL_OP_DEL_FDIR_FILTER:
+		valid_len = sizeof(struct virtchnl_fdir_del);
+		break;
+	case VIRTCHNL_OP_GET_QOS_CAPS:
+		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
+		valid_len = sizeof(struct virtchnl_queue_tc_mapping);
+		if (msglen >= valid_len) {
+			struct virtchnl_queue_tc_mapping *q_tc =
+				(struct virtchnl_queue_tc_mapping *)msg;
+			if (q_tc->num_tc == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_tc->num_tc - 1) *
+					 sizeof(q_tc->tc[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+		valid_len = sizeof(struct virtchnl_queues_bw_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_queues_bw_cfg *q_bw =
+				(struct virtchnl_queues_bw_cfg *)msg;
+			if (q_bw->num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (q_bw->num_queues - 1) *
+					 sizeof(q_bw->cfg[0]);
+		}
+		break;
+	case VIRTCHNL_OP_CONFIG_QUANTA:
+		valid_len = sizeof(struct virtchnl_quanta_cfg);
+		if (msglen >= valid_len) {
+			struct virtchnl_quanta_cfg *q_quanta =
+				(struct virtchnl_quanta_cfg *)msg;
+			if (q_quanta->quanta_size == 0 ||
+			    q_quanta->queue_select.num_queues == 0) {
+				err_msg_format = true;
+				break;
+			}
+		}
+		break;
+	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+		break;
+	case VIRTCHNL_OP_ADD_VLAN_V2:
+	case VIRTCHNL_OP_DEL_VLAN_V2:
+		valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+		if (msglen >= valid_len) {
+			struct virtchnl_vlan_filter_list_v2 *vfl =
+			    (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+			if (vfl->num_elements == 0 || vfl->num_elements >
+			    VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) {
+				err_msg_format = true;
+				break;
+			}
+
+			valid_len += (vfl->num_elements - 1) *
+				sizeof(struct virtchnl_vlan_filter);
+		}
+		break;
+	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+	case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2:
+	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
+		valid_len = sizeof(struct virtchnl_vlan_setting);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		valid_len = sizeof(struct virtchnl_ptp_caps);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+	case VIRTCHNL_OP_1588_PTP_SET_TIME:
+		valid_len = sizeof(struct virtchnl_phc_time);
+		break;
+	case VIRTCHNL_OP_1588_PTP_ADJ_TIME:
+		valid_len = sizeof(struct virtchnl_phc_adj_time);
+		break;
+	case VIRTCHNL_OP_1588_PTP_ADJ_FREQ:
+		valid_len = sizeof(struct virtchnl_phc_adj_freq);
+		break;
+	case VIRTCHNL_OP_1588_PTP_TX_TIMESTAMP:
+		valid_len = sizeof(struct virtchnl_phc_tx_tstamp);
+		break;
+	case VIRTCHNL_OP_1588_PTP_SET_PIN_CFG:
+		valid_len = sizeof(struct virtchnl_phc_set_pin);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_PIN_CFGS:
+		break;
+	case VIRTCHNL_OP_1588_PTP_EXT_TIMESTAMP:
+		valid_len = sizeof(struct virtchnl_phc_ext_tstamp);
+		break;
+	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
+	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
+		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
+		if (msglen >= valid_len) {
+			struct virtchnl_del_ena_dis_queues *qs =
+				(struct virtchnl_del_ena_dis_queues *)msg;
+			if (qs->chunks.num_chunks == 0 ||
+			    qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (qs->chunks.num_chunks - 1) *
+				      sizeof(struct virtchnl_queue_chunk);
+		}
+		break;
+	case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
+		valid_len = sizeof(struct virtchnl_queue_vector_maps);
+		if (msglen >= valid_len) {
+			struct virtchnl_queue_vector_maps *v_qp =
+				(struct virtchnl_queue_vector_maps *)msg;
+			if (v_qp->num_qv_maps == 0 ||
+			    v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (v_qp->num_qv_maps - 1) *
+				      sizeof(struct virtchnl_queue_vector);
+		}
+		break;
+	/* These are always errors coming from the VF. */
+	case VIRTCHNL_OP_EVENT:
+	case VIRTCHNL_OP_UNKNOWN:
+	default:
+		return VIRTCHNL_STATUS_ERR_PARAM;
+	}
+	/* few more checks */
+	if (err_msg_format || valid_len != msglen)
+		return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+	return 0;
+}
+#endif /* _VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/virtchnl2.h b/drivers/common/idpf/virtchnl2.h
new file mode 100644
index 0000000000..d496f2388e
--- /dev/null
+++ b/drivers/common/idpf/virtchnl2.h
@@ -0,0 +1,1462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL2_H_
+#define _VIRTCHNL2_H_
+
+/* All opcodes associated with virtchnl 2 are prefixed with virtchnl2 or
+ * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
+ * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
+ */
+
+#include "virtchnl2_lan_desc.h"
+
+/* Error Codes
+ * Note that many older versions of various iAVF drivers convert the reported
+ * status code directly into an iavf_status enumeration. For this reason, it
+ * is important that the values of these enumerations line up.
+ */
+#define		VIRTCHNL2_STATUS_SUCCESS		0
+#define		VIRTCHNL2_STATUS_ERR_PARAM		-5
+#define		VIRTCHNL2_STATUS_ERR_OPCODE_MISMATCH	-38
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) enum virtchnl2_static_assert_enum_##X \
+	{ virtchnl2_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL2_CHECK_UNION_LEN(n, X) enum virtchnl2_static_asset_enum_##X \
+	{ virtchnl2_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* New major set of opcodes introduced and so leaving room for
+ * old misc opcodes to be added in future. Also these opcodes may only
+ * be used if both the PF and VF have successfully negotiated the
+ * VIRTCHNL version as 2.0 during VIRTCHNL22_OP_VERSION exchange.
+ */
+#define		VIRTCHNL2_OP_UNKNOWN			0
+#define		VIRTCHNL2_OP_VERSION			1
+#define		VIRTCHNL2_OP_GET_CAPS			500
+#define		VIRTCHNL2_OP_CREATE_VPORT		501
+#define		VIRTCHNL2_OP_DESTROY_VPORT		502
+#define		VIRTCHNL2_OP_ENABLE_VPORT		503
+#define		VIRTCHNL2_OP_DISABLE_VPORT		504
+#define		VIRTCHNL2_OP_CONFIG_TX_QUEUES		505
+#define		VIRTCHNL2_OP_CONFIG_RX_QUEUES		506
+#define		VIRTCHNL2_OP_ENABLE_QUEUES		507
+#define		VIRTCHNL2_OP_DISABLE_QUEUES		508
+#define		VIRTCHNL2_OP_ADD_QUEUES			509
+#define		VIRTCHNL2_OP_DEL_QUEUES			510
+#define		VIRTCHNL2_OP_MAP_QUEUE_VECTOR		511
+#define		VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR		512
+#define		VIRTCHNL2_OP_GET_RSS_KEY		513
+#define		VIRTCHNL2_OP_SET_RSS_KEY		514
+#define		VIRTCHNL2_OP_GET_RSS_LUT		515
+#define		VIRTCHNL2_OP_SET_RSS_LUT		516
+#define		VIRTCHNL2_OP_GET_RSS_HASH		517
+#define		VIRTCHNL2_OP_SET_RSS_HASH		518
+#define		VIRTCHNL2_OP_SET_SRIOV_VFS		519
+#define		VIRTCHNL2_OP_ALLOC_VECTORS		520
+#define		VIRTCHNL2_OP_DEALLOC_VECTORS		521
+#define		VIRTCHNL2_OP_EVENT			522
+#define		VIRTCHNL2_OP_GET_STATS			523
+#define		VIRTCHNL2_OP_RESET_VF			524
+	/* opcode 525 is reserved */
+#define		VIRTCHNL2_OP_GET_PTYPE_INFO		526
+	/* opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
+	 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW
+	 */
+	/* opcodes 529, 530, and 531 are reserved */
+#define		VIRTCHNL2_OP_CREATE_ADI			532
+#define		VIRTCHNL2_OP_DESTROY_ADI		533
+#define		VIRTCHNL2_OP_LOOPBACK			534
+#define		VIRTCHNL2_OP_ADD_MAC_ADDR		535
+#define		VIRTCHNL2_OP_DEL_MAC_ADDR		536
+#define		VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE	537
+
+#define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX	0xFFFF
+
+/* VIRTCHNL2_VPORT_TYPE
+ * Type of virtual port
+ */
+#define VIRTCHNL2_VPORT_TYPE_DEFAULT		0
+#define VIRTCHNL2_VPORT_TYPE_SRIOV		1
+#define VIRTCHNL2_VPORT_TYPE_SIOV		2
+#define VIRTCHNL2_VPORT_TYPE_SUBDEV		3
+#define VIRTCHNL2_VPORT_TYPE_MNG		4
+
+/* VIRTCHNL2_QUEUE_MODEL
+ * Type of queue model
+ *
+ * In the single queue model, the same transmit descriptor queue is used by
+ * software to post descriptors to hardware and by hardware to post completed
+ * descriptors to software.
+ * Likewise, the same receive descriptor queue is used by hardware to post
+ * completions to software and by software to post buffers to hardware.
+ */
+#define VIRTCHNL2_QUEUE_MODEL_SINGLE		0
+/* In the split queue model, hardware uses transmit completion queues to post
+ * descriptor/buffer completions to software, while software uses transmit
+ * descriptor queues to post descriptors to hardware.
+ * Likewise, hardware posts descriptor completions to the receive descriptor
+ * queue, while software uses receive buffer queues to post buffers to hardware.
+ */
+#define VIRTCHNL2_QUEUE_MODEL_SPLIT		1
+
+/* VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS
+ * Checksum offload capability flags
+ */
+#define VIRTCHNL2_CAP_TX_CSUM_L3_IPV4		BIT(0)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP	BIT(1)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP	BIT(2)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP	BIT(3)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP	BIT(4)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP	BIT(5)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP	BIT(6)
+#define VIRTCHNL2_CAP_TX_CSUM_GENERIC		BIT(7)
+#define VIRTCHNL2_CAP_RX_CSUM_L3_IPV4		BIT(8)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP	BIT(9)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP	BIT(10)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP	BIT(11)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP	BIT(12)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP	BIT(13)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP	BIT(14)
+#define VIRTCHNL2_CAP_RX_CSUM_GENERIC		BIT(15)
+#define VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL	BIT(16)
+#define VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL	BIT(17)
+#define VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL	BIT(18)
+#define VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL	BIT(19)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL	BIT(20)
+#define VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL	BIT(21)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL	BIT(22)
+#define VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL	BIT(23)
+
+/* VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS
+ * Segmentation offload capability flags
+ */
+#define VIRTCHNL2_CAP_SEG_IPV4_TCP		BIT(0)
+#define VIRTCHNL2_CAP_SEG_IPV4_UDP		BIT(1)
+#define VIRTCHNL2_CAP_SEG_IPV4_SCTP		BIT(2)
+#define VIRTCHNL2_CAP_SEG_IPV6_TCP		BIT(3)
+#define VIRTCHNL2_CAP_SEG_IPV6_UDP		BIT(4)
+#define VIRTCHNL2_CAP_SEG_IPV6_SCTP		BIT(5)
+#define VIRTCHNL2_CAP_SEG_GENERIC		BIT(6)
+#define VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL	BIT(7)
+#define VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL	BIT(8)
+
+/* VIRTCHNL2_RSS_FLOW_TYPE_CAPS
+ * Receive Side Scaling Flow type capability flags
+ */
+#define VIRTCHNL2_CAP_RSS_IPV4_TCP		BIT(0)
+#define VIRTCHNL2_CAP_RSS_IPV4_UDP		BIT(1)
+#define VIRTCHNL2_CAP_RSS_IPV4_SCTP		BIT(2)
+#define VIRTCHNL2_CAP_RSS_IPV4_OTHER		BIT(3)
+#define VIRTCHNL2_CAP_RSS_IPV6_TCP		BIT(4)
+#define VIRTCHNL2_CAP_RSS_IPV6_UDP		BIT(5)
+#define VIRTCHNL2_CAP_RSS_IPV6_SCTP		BIT(6)
+#define VIRTCHNL2_CAP_RSS_IPV6_OTHER		BIT(7)
+#define VIRTCHNL2_CAP_RSS_IPV4_AH		BIT(8)
+#define VIRTCHNL2_CAP_RSS_IPV4_ESP		BIT(9)
+#define VIRTCHNL2_CAP_RSS_IPV4_AH_ESP		BIT(10)
+#define VIRTCHNL2_CAP_RSS_IPV6_AH		BIT(11)
+#define VIRTCHNL2_CAP_RSS_IPV6_ESP		BIT(12)
+#define VIRTCHNL2_CAP_RSS_IPV6_AH_ESP		BIT(13)
+
+/* VIRTCHNL2_HEADER_SPLIT_CAPS
+ * Header split capability flags
+ */
+/* for prepended metadata  */
+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L2		BIT(0)
+/* all VLANs go into header buffer */
+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L3		BIT(1)
+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4		BIT(2)
+#define VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6		BIT(3)
+
+/* VIRTCHNL2_RSC_OFFLOAD_CAPS
+ * Receive Side Coalescing offload capability flags
+ */
+#define VIRTCHNL2_CAP_RSC_IPV4_TCP		BIT(0)
+#define VIRTCHNL2_CAP_RSC_IPV4_SCTP		BIT(1)
+#define VIRTCHNL2_CAP_RSC_IPV6_TCP		BIT(2)
+#define VIRTCHNL2_CAP_RSC_IPV6_SCTP		BIT(3)
+
+/* VIRTCHNL2_OTHER_CAPS
+ * Other capability flags
+ * SPLITQ_QSCHED: Queue based scheduling using split queue model
+ * TX_VLAN: VLAN tag insertion
+ * RX_VLAN: VLAN tag stripping
+ */
+#define VIRTCHNL2_CAP_RDMA			BIT(0)
+#define VIRTCHNL2_CAP_SRIOV			BIT(1)
+#define VIRTCHNL2_CAP_MACFILTER			BIT(2)
+#define VIRTCHNL2_CAP_FLOW_DIRECTOR		BIT(3)
+#define VIRTCHNL2_CAP_SPLITQ_QSCHED		BIT(4)
+#define VIRTCHNL2_CAP_CRC			BIT(5)
+#define VIRTCHNL2_CAP_ADQ			BIT(6)
+#define VIRTCHNL2_CAP_WB_ON_ITR			BIT(7)
+#define VIRTCHNL2_CAP_PROMISC			BIT(8)
+#define VIRTCHNL2_CAP_LINK_SPEED		BIT(9)
+#define VIRTCHNL2_CAP_INLINE_IPSEC		BIT(10)
+#define VIRTCHNL2_CAP_LARGE_NUM_QUEUES		BIT(11)
+/* require additional info */
+#define VIRTCHNL2_CAP_VLAN			BIT(12)
+#define VIRTCHNL2_CAP_PTP			BIT(13)
+#define VIRTCHNL2_CAP_ADV_RSS			BIT(15)
+#define VIRTCHNL2_CAP_FDIR			BIT(16)
+#define VIRTCHNL2_CAP_RX_FLEX_DESC		BIT(17)
+#define VIRTCHNL2_CAP_PTYPE			BIT(18)
+#define VIRTCHNL2_CAP_LOOPBACK			BIT(19)
+#define VIRTCHNL2_CAP_OEM			BIT(20)
+
+/* VIRTCHNL2_DEVICE_TYPE */
+/* underlying device type */
+#define VIRTCHNL2_MEV_DEVICE			0
+
+/* VIRTCHNL2_TXQ_SCHED_MODE
+ * Transmit Queue Scheduling Modes - Queue mode is the legacy mode i.e. inorder
+ * completions where descriptors and buffers are completed at the same time.
+ * Flow scheduling mode allows for out of order packet processing where
+ * descriptors are cleaned in order, but buffers can be completed out of order.
+ */
+#define VIRTCHNL2_TXQ_SCHED_MODE_QUEUE		0
+#define VIRTCHNL2_TXQ_SCHED_MODE_FLOW		1
+
+/* VIRTCHNL2_TXQ_FLAGS
+ * Transmit Queue feature flags
+ *
+ * Enable rule miss completion type; packet completion for a packet
+ * sent on exception path; only relevant in flow scheduling mode
+ */
+#define VIRTCHNL2_TXQ_ENABLE_MISS_COMPL		BIT(0)
+
+/* VIRTCHNL2_PEER_TYPE
+ * Transmit mailbox peer type
+ */
+#define VIRTCHNL2_RDMA_CPF			0
+#define VIRTCHNL2_NVME_CPF			1
+#define VIRTCHNL2_ATE_CPF			2
+#define VIRTCHNL2_LCE_CPF			3
+
+/* VIRTCHNL2_RXQ_FLAGS
+ * Receive Queue Feature flags
+ */
+#define VIRTCHNL2_RXQ_RSC			BIT(0)
+#define VIRTCHNL2_RXQ_HDR_SPLIT			BIT(1)
+/* When set, packet descriptors are flushed by hardware immediately after
+ * processing each packet.
+ */
+#define VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK	BIT(2)
+#define VIRTCHNL2_RX_DESC_SIZE_16BYTE		BIT(3)
+#define VIRTCHNL2_RX_DESC_SIZE_32BYTE		BIT(4)
+
+/* VIRTCHNL2_RSS_ALGORITHM
+ * Type of RSS algorithm
+ */
+#define VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC		0
+#define VIRTCHNL2_RSS_ALG_R_ASYMMETRIC			1
+#define VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC		2
+#define VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC			3
+
+/* VIRTCHNL2_EVENT_CODES
+ * Type of event
+ */
+#define VIRTCHNL2_EVENT_UNKNOWN			0
+#define VIRTCHNL2_EVENT_LINK_CHANGE		1
+/* These messages are only sent to PF from CP */
+#define VIRTCHNL2_EVENT_START_RESET_ADI		2
+#define VIRTCHNL2_EVENT_FINISH_RESET_ADI	3
+
+/* VIRTCHNL2_QUEUE_TYPE
+ * Transmit and Receive queue types are valid in legacy as well as split queue
+ * models. With Split Queue model, 2 additional types are introduced -
+ * TX_COMPLETION and RX_BUFFER. In split queue model, receive  corresponds to
+ * the queue where hardware posts completions.
+ */
+#define VIRTCHNL2_QUEUE_TYPE_TX			0
+#define VIRTCHNL2_QUEUE_TYPE_RX			1
+#define VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION	2
+#define VIRTCHNL2_QUEUE_TYPE_RX_BUFFER		3
+#define VIRTCHNL2_QUEUE_TYPE_CONFIG_TX		4
+#define VIRTCHNL2_QUEUE_TYPE_CONFIG_RX		5
+#define VIRTCHNL2_QUEUE_TYPE_P2P_TX		6
+#define VIRTCHNL2_QUEUE_TYPE_P2P_RX		7
+#define VIRTCHNL2_QUEUE_TYPE_P2P_TX_COMPLETION	8
+#define VIRTCHNL2_QUEUE_TYPE_P2P_RX_BUFFER	9
+#define VIRTCHNL2_QUEUE_TYPE_MBX_TX		10
+#define VIRTCHNL2_QUEUE_TYPE_MBX_RX		11
+
+/* VIRTCHNL2_ITR_IDX
+ * Virtchannel interrupt throttling rate index
+ */
+#define VIRTCHNL2_ITR_IDX_0			0
+#define VIRTCHNL2_ITR_IDX_1			1
+#define VIRTCHNL2_ITR_IDX_2			2
+#define VIRTCHNL2_ITR_IDX_NO_ITR		3
+
+/* VIRTCHNL2_VECTOR_LIMITS
+ * Since PF/VF messages are limited by __le16 size, precalculate the maximum
+ * possible values of nested elements in virtchnl structures that virtual
+ * channel can possibly handle in a single message.
+ */
+
+#define VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX (\
+		((__le16)(~0) - sizeof(struct virtchnl2_del_ena_dis_queues)) / \
+		sizeof(struct virtchnl2_queue_chunk))
+
+#define VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX (\
+		((__le16)(~0) - sizeof(struct virtchnl2_queue_vector_maps)) / \
+		sizeof(struct virtchnl2_queue_vector))
+
+/* VIRTCHNL2_MAC_TYPE
+ * VIRTCHNL2_MAC_ADDR_PRIMARY
+ * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL2_OP_ADD_MAC_ADDR and
+ * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the underlying control plane
+ * function to accurately track the MAC address and for VM/function reset.
+ *
+ * VIRTCHNL2_MAC_ADDR_EXTRA
+ * PF/VF driver should set @type to VIRTCHNL2_MAC_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL2_OP_ADD_MAC_ADDR/VIRTCHNL2_OP_DEL_MAC_ADDR respectively.
+ */
+#define VIRTCHNL2_MAC_ADDR_PRIMARY		1
+#define VIRTCHNL2_MAC_ADDR_EXTRA		2
+
+/* VIRTCHNL2_PROMISC_FLAGS
+ * Flags used for promiscuous mode
+ */
+#define VIRTCHNL2_UNICAST_PROMISC		BIT(0)
+#define VIRTCHNL2_MULTICAST_PROMISC		BIT(1)
+
+/* VIRTCHNL2_PROTO_HDR_TYPE
+ * Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+/* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_ANY			0
+#define VIRTCHNL2_PROTO_HDR_PRE_MAC		1
+/* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_MAC			2
+#define VIRTCHNL2_PROTO_HDR_POST_MAC		3
+#define VIRTCHNL2_PROTO_HDR_ETHERTYPE		4
+#define VIRTCHNL2_PROTO_HDR_VLAN		5
+#define VIRTCHNL2_PROTO_HDR_SVLAN		6
+#define VIRTCHNL2_PROTO_HDR_CVLAN		7
+#define VIRTCHNL2_PROTO_HDR_MPLS		8
+#define VIRTCHNL2_PROTO_HDR_UMPLS		9
+#define VIRTCHNL2_PROTO_HDR_MMPLS		10
+#define VIRTCHNL2_PROTO_HDR_PTP			11
+#define VIRTCHNL2_PROTO_HDR_CTRL		12
+#define VIRTCHNL2_PROTO_HDR_LLDP		13
+#define VIRTCHNL2_PROTO_HDR_ARP			14
+#define VIRTCHNL2_PROTO_HDR_ECP			15
+#define VIRTCHNL2_PROTO_HDR_EAPOL		16
+#define VIRTCHNL2_PROTO_HDR_PPPOD		17
+#define VIRTCHNL2_PROTO_HDR_PPPOE		18
+/* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_IPV4		19
+/* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+/* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_IPV4_FRAG		20
+/* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_IPV6		21
+/* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_IPV6_FRAG		22
+#define VIRTCHNL2_PROTO_HDR_IPV6_EH		23
+/* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_UDP			24
+/* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_TCP			25
+/* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_SCTP		26
+/* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_ICMP		27
+/* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_ICMPV6		28
+#define VIRTCHNL2_PROTO_HDR_IGMP		29
+#define VIRTCHNL2_PROTO_HDR_AH			30
+#define VIRTCHNL2_PROTO_HDR_ESP			31
+#define VIRTCHNL2_PROTO_HDR_IKE			32
+#define VIRTCHNL2_PROTO_HDR_NATT_KEEP		33
+/* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_PAY			34
+#define VIRTCHNL2_PROTO_HDR_L2TPV2		35
+#define VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL	36
+#define VIRTCHNL2_PROTO_HDR_L2TPV3		37
+#define VIRTCHNL2_PROTO_HDR_GTP			38
+#define VIRTCHNL2_PROTO_HDR_GTP_EH		39
+#define VIRTCHNL2_PROTO_HDR_GTPCV2		40
+#define VIRTCHNL2_PROTO_HDR_GTPC_TEID		41
+#define VIRTCHNL2_PROTO_HDR_GTPU		42
+#define VIRTCHNL2_PROTO_HDR_GTPU_UL		43
+#define VIRTCHNL2_PROTO_HDR_GTPU_DL		44
+#define VIRTCHNL2_PROTO_HDR_ECPRI		45
+#define VIRTCHNL2_PROTO_HDR_VRRP		46
+#define VIRTCHNL2_PROTO_HDR_OSPF		47
+/* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_TUN			48
+#define VIRTCHNL2_PROTO_HDR_GRE			49
+#define VIRTCHNL2_PROTO_HDR_NVGRE		50
+#define VIRTCHNL2_PROTO_HDR_VXLAN		51
+#define VIRTCHNL2_PROTO_HDR_VXLAN_GPE		52
+#define VIRTCHNL2_PROTO_HDR_GENEVE		53
+#define VIRTCHNL2_PROTO_HDR_NSH			54
+#define VIRTCHNL2_PROTO_HDR_QUIC		55
+#define VIRTCHNL2_PROTO_HDR_PFCP		56
+#define VIRTCHNL2_PROTO_HDR_PFCP_NODE		57
+#define VIRTCHNL2_PROTO_HDR_PFCP_SESSION	58
+#define VIRTCHNL2_PROTO_HDR_RTP			59
+#define VIRTCHNL2_PROTO_HDR_ROCE		60
+#define VIRTCHNL2_PROTO_HDR_ROCEV1		61
+#define VIRTCHNL2_PROTO_HDR_ROCEV2		62
+/* protocol ids up to 32767 are reserved for AVF use */
+/* 32768 - 65534 are used for user defined protocol ids */
+/* VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id */
+#define VIRTCHNL2_PROTO_HDR_NO_PROTO		65535
+
+#define VIRTCHNL2_VERSION_MAJOR_2        2
+#define VIRTCHNL2_VERSION_MINOR_0        0
+
+
+/* VIRTCHNL2_OP_VERSION
+ * VF posts its version number to the CP. CP responds with its version number
+ * in the same format, along with a return code.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This version opcode  MUST always be specified as == 1, regardless of other
+ * changes in the API. The CP must always respond to this message without
+ * error regardless of version mismatch.
+ */
+struct virtchnl2_version_info {
+	u32 major;
+	u32 minor;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
+
+/* VIRTCHNL2_OP_GET_CAPS
+ * Dataplane driver sends this message to CP to negotiate capabilities and
+ * provides a virtchnl2_get_capabilities structure with its desired
+ * capabilities, max_sriov_vfs and num_allocated_vectors.
+ * CP responds with a virtchnl2_get_capabilities structure updated
+ * with allowed capabilities and the other fields as below.
+ * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
+ * that can be created by this PF. For any other value 'n', CP responds
+ * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
+ * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
+ * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
+ * which is default vector associated with the default mailbox. For any other
+ * value 'n', CP responds with a value <= n based on the CP's policy of
+ * max number of vectors for a PF.
+ * CP will respond with the vector ID of mailbox allocated to the PF in
+ * mailbox_vector_id and the number of itr index registers in itr_idx_map.
+ * It also responds with default number of vports that the dataplane driver
+ * should comeup with in default_num_vports and maximum number of vports that
+ * can be supported in max_vports
+ */
+struct virtchnl2_get_capabilities {
+	/* see VIRTCHNL2_CHECKSUM_OFFLOAD_CAPS definitions */
+	__le32 csum_caps;
+
+	/* see VIRTCHNL2_SEGMENTATION_OFFLOAD_CAPS definitions */
+	__le32 seg_caps;
+
+	/* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */
+	__le32 hsplit_caps;
+
+	/* see VIRTCHNL2_RSC_OFFLOAD_CAPS definitions */
+	__le32 rsc_caps;
+
+	/* see VIRTCHNL2_RSS_FLOW_TYPE_CAPS definitions  */
+	__le64 rss_caps;
+
+
+	/* see VIRTCHNL2_OTHER_CAPS definitions  */
+	__le64 other_caps;
+
+	/* DYN_CTL register offset and vector id for mailbox provided by CP */
+	__le32 mailbox_dyn_ctl;
+	__le16 mailbox_vector_id;
+	/* Maximum number of allocated vectors for the device */
+	__le16 num_allocated_vectors;
+
+	/* Maximum number of queues that can be supported */
+	__le16 max_rx_q;
+	__le16 max_tx_q;
+	__le16 max_rx_bufq;
+	__le16 max_tx_complq;
+
+	/* The PF sends the maximum VFs it is requesting. The CP responds with
+	 * the maximum VFs granted.
+	 */
+	__le16 max_sriov_vfs;
+
+	/* maximum number of vports that can be supported */
+	__le16 max_vports;
+	/* default number of vports driver should allocate on load */
+	__le16 default_num_vports;
+
+	/* Max header length hardware can parse/checksum, in bytes */
+	__le16 max_tx_hdr_size;
+
+	/* Max number of scatter gather buffers that can be sent per transmit
+	 * packet without needing to be linearized
+	 */
+	u8 max_sg_bufs_per_tx_pkt;
+
+	/* see VIRTCHNL2_ITR_IDX definition */
+	u8 itr_idx_map;
+
+	__le16 pad1;
+
+	/* version of Control Plane that is running */
+	__le16 oem_cp_ver_major;
+	__le16 oem_cp_ver_minor;
+	/* see VIRTCHNL2_DEVICE_TYPE definitions */
+	__le32 device_type;
+
+	u8 reserved[12];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
+
+struct virtchnl2_queue_reg_chunk {
+	/* see VIRTCHNL2_QUEUE_TYPE definitions */
+	__le32 type;
+	__le32 start_queue_id;
+	__le32 num_queues;
+	__le32 pad;
+
+	/* Queue tail register offset and spacing provided by CP */
+	__le64 qtail_reg_start;
+	__le32 qtail_reg_spacing;
+
+	u8 reserved[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct virtchnl2_queue_reg_chunks {
+	__le16 num_chunks;
+	u8 reserved[6];
+	struct virtchnl2_queue_reg_chunk chunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
+
+#define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS  6
+
+/* VIRTCHNL2_OP_CREATE_VPORT
+ * PF sends this message to CP to create a vport by filling in required
+ * fields of virtchnl2_create_vport structure.
+ * CP responds with the updated virtchnl2_create_vport structure containing the
+ * necessary fields followed by chunks which in turn will have an array of
+ * num_chunks entries of virtchnl2_queue_chunk structures.
+ */
+struct virtchnl2_create_vport {
+	/* PF/VF populates the following fields on request */
+	/* see VIRTCHNL2_VPORT_TYPE definitions */
+	__le16 vport_type;
+
+	/* see VIRTCHNL2_QUEUE_MODEL definitions */
+	__le16 txq_model;
+
+	/* see VIRTCHNL2_QUEUE_MODEL definitions */
+	__le16 rxq_model;
+	__le16 num_tx_q;
+	/* valid only if txq_model is split queue */
+	__le16 num_tx_complq;
+	__le16 num_rx_q;
+	/* valid only if rxq_model is split queue */
+	__le16 num_rx_bufq;
+	/* relative receive queue index to be used as default */
+	__le16 default_rx_q;
+	/* used to align PF and CP in case of default multiple vports, it is
+	 * filled by the PF and CP returns the same value, to enable the driver
+	 * to support multiple asynchronous parallel CREATE_VPORT requests and
+	 * associate a response to a specific request
+	 */
+	__le16 vport_index;
+
+	/* CP populates the following fields on response */
+	__le16 max_mtu;
+	__le32 vport_id;
+	u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
+	__le16 pad;
+	/* see VIRTCHNL2_RX_DESC_IDS definitions */
+	__le64 rx_desc_ids;
+	/* see VIRTCHNL2_TX_DESC_IDS definitions */
+	__le64 tx_desc_ids;
+
+#define MAX_Q_REGIONS 16
+	__le32 max_qs_per_qregion[MAX_Q_REGIONS];
+	__le32 qregion_total_qs;
+	__le16 qregion_type;
+	__le16 pad2;
+
+	/* see VIRTCHNL2_RSS_ALGORITHM definitions */
+	__le32 rss_algorithm;
+	__le16 rss_key_size;
+	__le16 rss_lut_size;
+
+	/* see VIRTCHNL2_HEADER_SPLIT_CAPS definitions */
+	__le32 rx_split_pos;
+
+	u8 reserved[20];
+	struct virtchnl2_queue_reg_chunks chunks;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(192, virtchnl2_create_vport);
+
+/* VIRTCHNL2_OP_DESTROY_VPORT
+ * VIRTCHNL2_OP_ENABLE_VPORT
+ * VIRTCHNL2_OP_DISABLE_VPORT
+ * PF sends this message to CP to destroy, enable or disable a vport by filling
+ * in the vport_id in virtchnl2_vport structure.
+ * CP responds with the status of the requested operation.
+ */
+struct virtchnl2_vport {
+	__le32 vport_id;
+	u8 reserved[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
+
+/* Transmit queue config info */
+struct virtchnl2_txq_info {
+	__le64 dma_ring_addr;
+
+	/* see VIRTCHNL2_QUEUE_TYPE definitions */
+	__le32 type;
+
+	__le32 queue_id;
+	/* valid only if queue model is split and type is transmit queue. Used
+	 * in many to one mapping of transmit queues to completion queue
+	 */
+	__le16 relative_queue_id;
+
+	/* see VIRTCHNL2_QUEUE_MODEL definitions */
+	__le16 model;
+
+	/* see VIRTCHNL2_TXQ_SCHED_MODE definitions */
+	__le16 sched_mode;
+
+	/* see VIRTCHNL2_TXQ_FLAGS definitions */
+	__le16 qflags;
+	__le16 ring_len;
+
+	/* valid only if queue model is split and type is transmit queue */
+	__le16 tx_compl_queue_id;
+	/* valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX */
+	/* see VIRTCHNL2_PEER_TYPE definitions */
+	__le16 peer_type;
+	/* valid only if queue type is CONFIG_TX and used to deliver messages
+	 * for the respective CONFIG_TX queue
+	 */
+	__le16 peer_rx_queue_id;
+
+	/* value ranges from 0 to 15 */
+	__le16 qregion_id;
+	u8 pad[2];
+
+	/* Egress pasid is used for SIOV use case */
+	__le32 egress_pasid;
+	__le32 egress_hdr_pasid;
+	__le32 egress_buf_pasid;
+
+	u8 reserved[8];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
+
+/* VIRTCHNL2_OP_CONFIG_TX_QUEUES
+ * PF sends this message to set up parameters for one or more transmit queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_txq_info
+ * structures. CP configures requested queues and returns a status code. If
+ * num_qinfo specified is greater than the number of queues associated with the
+ * vport, an error is returned and no queues are configured.
+ */
+struct virtchnl2_config_tx_queues {
+	__le32 vport_id;
+	__le16 num_qinfo;
+
+	u8 reserved[10];
+	struct virtchnl2_txq_info qinfo[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(72, virtchnl2_config_tx_queues);
+
+/* Receive queue config info */
+struct virtchnl2_rxq_info {
+	/* see VIRTCHNL2_RX_DESC_IDS definitions */
+	__le64 desc_ids;
+	__le64 dma_ring_addr;
+
+	/* see VIRTCHNL2_QUEUE_TYPE definitions */
+	__le32 type;
+	__le32 queue_id;
+
+	/* see QUEUE_MODEL definitions */
+	__le16 model;
+
+	__le16 hdr_buffer_size;
+	__le32 data_buffer_size;
+	__le32 max_pkt_size;
+
+	__le16 ring_len;
+	u8 buffer_notif_stride;
+	u8 pad[1];
+
+	/* Applicable only for receive buffer queues */
+	__le64 dma_head_wb_addr;
+
+	/* Applicable only for receive completion queues */
+	/* see VIRTCHNL2_RXQ_FLAGS definitions */
+	__le16 qflags;
+
+	__le16 rx_buffer_low_watermark;
+
+	/* valid only in split queue model */
+	__le16 rx_bufq1_id;
+	/* valid only in split queue model */
+	__le16 rx_bufq2_id;
+	/* it indicates if there is a second buffer, rx_bufq2_id is valid only
+	 * if this field is set
+	 */
+	u8 bufq2_ena;
+	u8 pad2;
+
+	/* value ranges from 0 to 15 */
+	__le16 qregion_id;
+
+	/* Ingress pasid is used for SIOV use case */
+	__le32 ingress_pasid;
+	__le32 ingress_hdr_pasid;
+	__le32 ingress_buf_pasid;
+
+	u8 reserved[16];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
+
+/* VIRTCHNL2_OP_CONFIG_RX_QUEUES
+ * PF sends this message to set up parameters for one or more receive queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
+ * structures. CP configures requested queues and returns a status code.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the vport, an error is returned and no queues are configured.
+ */
+struct virtchnl2_config_rx_queues {
+	__le32 vport_id;
+	__le16 num_qinfo;
+
+	u8 reserved[18];
+	struct virtchnl2_rxq_info qinfo[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(112, virtchnl2_config_rx_queues);
+
+/* VIRTCHNL2_OP_ADD_QUEUES
+ * PF sends this message to request additional transmit/receive queues beyond
+ * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
+ * structure is used to specify the number of each type of queues.
+ * CP responds with the same structure with the actual number of queues assigned
+ * followed by num_chunks of virtchnl2_queue_chunk structures.
+ */
+struct virtchnl2_add_queues {
+	__le32 vport_id;
+	__le16 num_tx_q;
+	__le16 num_tx_complq;
+	__le16 num_rx_q;
+	__le16 num_rx_bufq;
+	u8 reserved[4];
+	struct virtchnl2_queue_reg_chunks chunks;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_add_queues);
+
+/* Structure to specify a chunk of contiguous interrupt vectors */
+struct virtchnl2_vector_chunk {
+	__le16 start_vector_id;
+	__le16 start_evv_id;
+	__le16 num_vectors;
+	__le16 pad1;
+
+	/* Register offsets and spacing provided by CP.
+	 * dynamic control registers are used for enabling/disabling/re-enabling
+	 * interrupts and updating interrupt rates in the hotpath. Any changes
+	 * to interrupt rates in the dynamic control registers will be reflected
+	 * in the interrupt throttling rate registers.
+	 * itrn registers are used to update interrupt rates for specific
+	 * interrupt indices without modifying the state of the interrupt.
+	 */
+	__le32 dynctl_reg_start;
+	__le32 dynctl_reg_spacing;
+
+	__le32 itrn_reg_start;
+	__le32 itrn_reg_spacing;
+	u8 reserved[8];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
+
+/* Structure to specify several chunks of contiguous interrupt vectors */
+struct virtchnl2_vector_chunks {
+	__le16 num_vchunks;
+	u8 reserved[14];
+	struct virtchnl2_vector_chunk vchunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_vector_chunks);
+
+/* VIRTCHNL2_OP_ALLOC_VECTORS
+ * PF sends this message to request additional interrupt vectors beyond the
+ * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
+ * structure is used to specify the number of vectors requested. CP responds
+ * with the same structure with the actual number of vectors assigned followed
+ * by virtchnl2_vector_chunks structure identifying the vector ids.
+ */
+struct virtchnl2_alloc_vectors {
+	__le16 num_vectors;
+	u8 reserved[14];
+	struct virtchnl2_vector_chunks vchunks;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(64, virtchnl2_alloc_vectors);
+
+/* VIRTCHNL2_OP_DEALLOC_VECTORS
+ * PF sends this message to release the vectors.
+ * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
+ * away. CP performs requested action and returns status.
+ */
+
+/* VIRTCHNL2_OP_GET_RSS_LUT
+ * VIRTCHNL2_OP_SET_RSS_LUT
+ * PF sends this message to get or set RSS lookup table. Only supported if
+ * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation. Uses the virtchnl2_rss_lut structure
+ */
+struct virtchnl2_rss_lut {
+	__le32 vport_id;
+	__le16 lut_entries_start;
+	__le16 lut_entries;
+	u8 reserved[4];
+	__le32 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_lut);
+
+/* VIRTCHNL2_OP_GET_RSS_KEY
+ * PF sends this message to get RSS key. Only supported if both PF and CP
+ * drivers set the VIRTCHNL2_CAP_RSS bit during configuration negotiation. Uses
+ * the virtchnl2_rss_key structure
+ */
+
+/* VIRTCHNL2_OP_GET_RSS_HASH
+ * VIRTCHNL2_OP_SET_RSS_HASH
+ * PF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the CP sets these to all possible traffic types that the
+ * hardware supports. The PF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
+ * during configuration negotiation.
+ */
+struct virtchnl2_rss_hash {
+	/* Packet Type Groups bitmap */
+	__le64 ptype_groups;
+	__le32 vport_id;
+	u8 reserved[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
+
+/* VIRTCHNL2_OP_SET_SRIOV_VFS
+ * This message is used to set number of SRIOV VFs to be created. The actual
+ * allocation of resources for the VFs in terms of vport, queues and interrupts
+ * is done by CP. When this call completes, the APF driver calls
+ * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
+ * The number of VFs set to 0 will destroy all the VFs of this function.
+ */
+
+struct virtchnl2_sriov_vfs_info {
+	__le16 num_vfs;
+	__le16 pad;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
+
+/* VIRTCHNL2_OP_CREATE_ADI
+ * PF sends this message to CP to create ADI by filling in required
+ * fields of virtchnl2_create_adi structure.
+ * CP responds with the updated virtchnl2_create_adi structure containing the
+ * necessary fields followed by chunks which in turn will have an array of
+ * num_chunks entries of virtchnl2_queue_chunk structures.
+ */
+struct virtchnl2_create_adi {
+	/* PF sends PASID to CP */
+	__le32 pasid;
+	/*
+	 * mbx_id is set to 1 by PF when requesting CP to provide HW mailbox
+	 * id else it is set to 0 by PF
+	 */
+	__le16 mbx_id;
+	/* PF sends mailbox vector id to CP */
+	__le16 mbx_vec_id;
+	/* CP populates ADI id */
+	__le16 adi_id;
+	u8 reserved[64];
+	u8 pad[6];
+	/* CP populates queue chunks */
+	struct virtchnl2_queue_reg_chunks chunks;
+	/* PF sends vector chunks to CP */
+	struct virtchnl2_vector_chunks vchunks;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_create_adi);
+
+/* VIRTCHNL2_OP_DESTROY_ADI
+ * PF sends this message to CP to destroy ADI by filling
+ * in the adi_id in virtchnl2_destropy_adi structure.
+ * CP responds with the status of the requested operation.
+ */
+struct virtchnl2_destroy_adi {
+	__le16 adi_id;
+	u8 reserved[2];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_destroy_adi);
+
+/* Based on the descriptor type the PF supports, CP fills ptype_id_10 or
+ * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
+ * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
+ * last ptype.
+ */
+struct virtchnl2_ptype {
+	__le16 ptype_id_10;
+	u8 ptype_id_8;
+	/* number of protocol ids the packet supports, maximum of 32
+	 * protocol ids are supported
+	 */
+	u8 proto_id_count;
+	__le16 pad;
+	/* proto_id_count decides the allocation of protocol id array */
+	/* see VIRTCHNL2_PROTO_HDR_TYPE */
+	__le16 proto_id[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptype);
+
+/* VIRTCHNL2_OP_GET_PTYPE_INFO
+ * PF sends this message to CP to get all supported packet types. It does by
+ * filling in start_ptype_id and num_ptypes. Depending on descriptor type the
+ * PF supports, it sets num_ptypes to 1024 (10-bit ptype) for flex descriptor
+ * and 256 (8-bit ptype) for base descriptor support. CP responds back to PF by
+ * populating start_ptype_id, num_ptypes and array of ptypes. If all ptypes
+ * doesn't fit into one mailbox buffer, CP splits ptype info into multiple
+ * messages, where each message will have the start ptype id, number of ptypes
+ * sent in that message and the ptype array itself. When CP is done updating
+ * all ptype information it extracted from the package (number of ptypes
+ * extracted might be less than what PF expects), it will append a dummy ptype
+ * (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF) to the ptype
+ * array. PF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO
+ * messages.
+ */
+struct virtchnl2_get_ptype_info {
+	__le16 start_ptype_id;
+	__le16 num_ptypes;
+	__le32 pad;
+	struct virtchnl2_ptype ptype[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_get_ptype_info);
+
+/* VIRTCHNL2_OP_GET_STATS
+ * PF/VF sends this message to CP to get the update stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
+ */
+struct virtchnl2_vport_stats {
+	__le32 vport_id;
+	u8 pad[4];
+
+	__le64 rx_bytes;		/* received bytes */
+	__le64 rx_unicast;		/* received unicast pkts */
+	__le64 rx_multicast;		/* received multicast pkts */
+	__le64 rx_broadcast;		/* received broadcast pkts */
+	__le64 rx_discards;
+	__le64 rx_errors;
+	__le64 rx_unknown_protocol;
+	__le64 tx_bytes;		/* transmitted bytes */
+	__le64 tx_unicast;		/* transmitted unicast pkts */
+	__le64 tx_multicast;		/* transmitted multicast pkts */
+	__le64 tx_broadcast;		/* transmitted broadcast pkts */
+	__le64 tx_discards;
+	__le64 tx_errors;
+	__le64 rx_invalid_frame_length;
+	__le64 rx_overflow_drop;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
+
+/* VIRTCHNL2_OP_EVENT
+ * CP sends this message to inform the PF/VF driver of events that may affect
+ * it. No direct response is expected from the driver, though it may generate
+ * other messages in response to this one.
+ */
+struct virtchnl2_event {
+	/* see VIRTCHNL2_EVENT_CODES definitions */
+	__le32 event;
+	/* link_speed provided in Mbps */
+	__le32 link_speed;
+	__le32 vport_id;
+	u8 link_status;
+	u8 pad[1];
+	/* CP sends reset notification to PF with corresponding ADI ID */
+	__le16 adi_id;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
+
+/* VIRTCHNL2_OP_GET_RSS_KEY
+ * VIRTCHNL2_OP_SET_RSS_KEY
+ * PF/VF sends this message to get or set RSS key. Only supported if both
+ * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation. Uses the virtchnl2_rss_key structure
+ */
+struct virtchnl2_rss_key {
+	__le32 vport_id;
+	__le16 key_len;
+	u8 pad;
+	u8 key[1];         /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl2_queue_chunk {
+	/* see VIRTCHNL2_QUEUE_TYPE definitions */
+	__le32 type;
+	__le32 start_queue_id;
+	__le32 num_queues;
+	u8 reserved[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct virtchnl2_queue_chunks {
+	__le16 num_chunks;
+	u8 reserved[6];
+	struct virtchnl2_queue_chunk chunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_chunks);
+
+/* VIRTCHNL2_OP_ENABLE_QUEUES
+ * VIRTCHNL2_OP_DISABLE_QUEUES
+ * VIRTCHNL2_OP_DEL_QUEUES
+ *
+ * PF sends these messages to enable, disable or delete queues specified in
+ * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
+ * to be enabled/disabled/deleted. Also applicable to single queue receive or
+ * transmit. CP performs requested action and returns status.
+ */
+struct virtchnl2_del_ena_dis_queues {
+	__le32 vport_id;
+	u8 reserved[4];
+	struct virtchnl2_queue_chunks chunks;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_del_ena_dis_queues);
+
+/* Queue to vector mapping */
+struct virtchnl2_queue_vector {
+	__le32 queue_id;
+	__le16 vector_id;
+	u8 pad[2];
+
+	/* see VIRTCHNL2_ITR_IDX definitions */
+	__le32 itr_idx;
+
+	/* see VIRTCHNL2_QUEUE_TYPE definitions */
+	__le32 queue_type;
+	u8 reserved[8];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
+
+/* VIRTCHNL2_OP_MAP_QUEUE_VECTOR
+ * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR
+ *
+ * PF sends this message to map or unmap queues to vectors and interrupt
+ * throttling rate index registers. External data buffer contains
+ * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
+ * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
+ * after validating the queue and vector ids and returns a status code.
+ */
+struct virtchnl2_queue_vector_maps {
+	__le32 vport_id;
+	__le16 num_qv_maps;
+	u8 pad[10];
+	struct virtchnl2_queue_vector qv_maps[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_vector_maps);
+
+/* VIRTCHNL2_OP_LOOPBACK
+ *
+ * PF/VF sends this message to transition to/from the loopback state. Setting
+ * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
+ * disables it. CP configures the state to loopback and returns status.
+ */
+struct virtchnl2_loopback {
+	__le32 vport_id;
+	u8 enable;
+	u8 pad[3];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
+
+/* structure to specify each MAC address */
+struct virtchnl2_mac_addr {
+	u8 addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
+	/* see VIRTCHNL2_MAC_TYPE definitions */
+	u8 type;
+	u8 pad;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
+
+/* VIRTCHNL2_OP_ADD_MAC_ADDR
+ * VIRTCHNL2_OP_DEL_MAC_ADDR
+ *
+ * PF/VF driver uses this structure to send list of MAC addresses to be
+ * added/deleted to the CP where as CP performs the action and returns the
+ * status.
+ */
+struct virtchnl2_mac_addr_list {
+	__le32 vport_id;
+	__le16 num_mac_addr;
+	u8 pad[2];
+	struct virtchnl2_mac_addr mac_addr_list[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mac_addr_list);
+
+/* VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE
+ *
+ * PF/VF sends vport id and flags to the CP where as CP performs the action
+ * and returns the status.
+ */
+struct virtchnl2_promisc_info {
+	__le32 vport_id;
+	/* see VIRTCHNL2_PROMISC_FLAGS definitions */
+	__le16 flags;
+	u8 pad[2];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+
+
+static inline const char *virtchnl2_op_str(__le32 v_opcode)
+{
+	switch (v_opcode) {
+	case VIRTCHNL2_OP_VERSION:
+		return "VIRTCHNL2_OP_VERSION";
+	case VIRTCHNL2_OP_GET_CAPS:
+		return "VIRTCHNL2_OP_GET_CAPS";
+	case VIRTCHNL2_OP_CREATE_VPORT:
+		return "VIRTCHNL2_OP_CREATE_VPORT";
+	case VIRTCHNL2_OP_DESTROY_VPORT:
+		return "VIRTCHNL2_OP_DESTROY_VPORT";
+	case VIRTCHNL2_OP_ENABLE_VPORT:
+		return "VIRTCHNL2_OP_ENABLE_VPORT";
+	case VIRTCHNL2_OP_DISABLE_VPORT:
+		return "VIRTCHNL2_OP_DISABLE_VPORT";
+	case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+		return "VIRTCHNL2_OP_CONFIG_TX_QUEUES";
+	case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+		return "VIRTCHNL2_OP_CONFIG_RX_QUEUES";
+	case VIRTCHNL2_OP_ENABLE_QUEUES:
+		return "VIRTCHNL2_OP_ENABLE_QUEUES";
+	case VIRTCHNL2_OP_DISABLE_QUEUES:
+		return "VIRTCHNL2_OP_DISABLE_QUEUES";
+	case VIRTCHNL2_OP_ADD_QUEUES:
+		return "VIRTCHNL2_OP_ADD_QUEUES";
+	case VIRTCHNL2_OP_DEL_QUEUES:
+		return "VIRTCHNL2_OP_DEL_QUEUES";
+	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+		return "VIRTCHNL2_OP_MAP_QUEUE_VECTOR";
+	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+		return "VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR";
+	case VIRTCHNL2_OP_GET_RSS_KEY:
+		return "VIRTCHNL2_OP_GET_RSS_KEY";
+	case VIRTCHNL2_OP_SET_RSS_KEY:
+		return "VIRTCHNL2_OP_SET_RSS_KEY";
+	case VIRTCHNL2_OP_GET_RSS_LUT:
+		return "VIRTCHNL2_OP_GET_RSS_LUT";
+	case VIRTCHNL2_OP_SET_RSS_LUT:
+		return "VIRTCHNL2_OP_SET_RSS_LUT";
+	case VIRTCHNL2_OP_GET_RSS_HASH:
+		return "VIRTCHNL2_OP_GET_RSS_HASH";
+	case VIRTCHNL2_OP_SET_RSS_HASH:
+		return "VIRTCHNL2_OP_SET_RSS_HASH";
+	case VIRTCHNL2_OP_SET_SRIOV_VFS:
+		return "VIRTCHNL2_OP_SET_SRIOV_VFS";
+	case VIRTCHNL2_OP_ALLOC_VECTORS:
+		return "VIRTCHNL2_OP_ALLOC_VECTORS";
+	case VIRTCHNL2_OP_DEALLOC_VECTORS:
+		return "VIRTCHNL2_OP_DEALLOC_VECTORS";
+	case VIRTCHNL2_OP_GET_PTYPE_INFO:
+		return "VIRTCHNL2_OP_GET_PTYPE_INFO";
+	case VIRTCHNL2_OP_GET_STATS:
+		return "VIRTCHNL2_OP_GET_STATS";
+	case VIRTCHNL2_OP_EVENT:
+		return "VIRTCHNL2_OP_EVENT";
+	case VIRTCHNL2_OP_RESET_VF:
+		return "VIRTCHNL2_OP_RESET_VF";
+	case VIRTCHNL2_OP_CREATE_ADI:
+		return "VIRTCHNL2_OP_CREATE_ADI";
+	case VIRTCHNL2_OP_DESTROY_ADI:
+		return "VIRTCHNL2_OP_DESTROY_ADI";
+	default:
+		return "Unsupported (update virtchnl2.h)";
+	}
+}
+
+/**
+ * virtchnl2_vc_validate_vf_msg
+ * @ver: Virtchnl2 version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u32 v_opcode,
+			     u8 *msg, __le16 msglen)
+{
+	bool err_msg_format = false;
+	__le32 valid_len = 0;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case VIRTCHNL2_OP_VERSION:
+		valid_len = sizeof(struct virtchnl2_version_info);
+		break;
+	case VIRTCHNL2_OP_GET_CAPS:
+		valid_len = sizeof(struct virtchnl2_get_capabilities);
+		break;
+	case VIRTCHNL2_OP_CREATE_VPORT:
+		valid_len = sizeof(struct virtchnl2_create_vport);
+		if (msglen >= valid_len) {
+			struct virtchnl2_create_vport *cvport =
+				(struct virtchnl2_create_vport *)msg;
+
+			if (cvport->chunks.num_chunks == 0) {
+				/* zero chunks is allowed as input */
+				break;
+			}
+
+			valid_len += (cvport->chunks.num_chunks - 1) *
+				      sizeof(struct virtchnl2_queue_reg_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_CREATE_ADI:
+		valid_len = sizeof(struct virtchnl2_create_adi);
+		if (msglen >= valid_len) {
+			struct virtchnl2_create_adi *cadi =
+				(struct virtchnl2_create_adi *)msg;
+
+			if (cadi->chunks.num_chunks == 0) {
+				/* zero chunks is allowed as input */
+				break;
+			}
+
+			if (cadi->vchunks.num_vchunks == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (cadi->chunks.num_chunks - 1) *
+				      sizeof(struct virtchnl2_queue_reg_chunk);
+			valid_len += (cadi->vchunks.num_vchunks - 1) *
+				      sizeof(struct virtchnl2_vector_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_DESTROY_ADI:
+		valid_len = sizeof(struct virtchnl2_destroy_adi);
+		break;
+	case VIRTCHNL2_OP_DESTROY_VPORT:
+	case VIRTCHNL2_OP_ENABLE_VPORT:
+	case VIRTCHNL2_OP_DISABLE_VPORT:
+		valid_len = sizeof(struct virtchnl2_vport);
+		break;
+	case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+		valid_len = sizeof(struct virtchnl2_config_tx_queues);
+		if (msglen >= valid_len) {
+			struct virtchnl2_config_tx_queues *ctq =
+				(struct virtchnl2_config_tx_queues *)msg;
+			if (ctq->num_qinfo == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (ctq->num_qinfo - 1) *
+				     sizeof(struct virtchnl2_txq_info);
+		}
+		break;
+	case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+		valid_len = sizeof(struct virtchnl2_config_rx_queues);
+		if (msglen >= valid_len) {
+			struct virtchnl2_config_rx_queues *crq =
+				(struct virtchnl2_config_rx_queues *)msg;
+			if (crq->num_qinfo == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (crq->num_qinfo - 1) *
+				     sizeof(struct virtchnl2_rxq_info);
+		}
+		break;
+	case VIRTCHNL2_OP_ADD_QUEUES:
+		valid_len = sizeof(struct virtchnl2_add_queues);
+		if (msglen >= valid_len) {
+			struct virtchnl2_add_queues *add_q =
+				(struct virtchnl2_add_queues *)msg;
+
+			if (add_q->chunks.num_chunks == 0) {
+				/* zero chunks is allowed as input */
+				break;
+			}
+
+			valid_len += (add_q->chunks.num_chunks - 1) *
+				      sizeof(struct virtchnl2_queue_reg_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_ENABLE_QUEUES:
+	case VIRTCHNL2_OP_DISABLE_QUEUES:
+	case VIRTCHNL2_OP_DEL_QUEUES:
+		valid_len = sizeof(struct virtchnl2_del_ena_dis_queues);
+		if (msglen >= valid_len) {
+			struct virtchnl2_del_ena_dis_queues *qs =
+				(struct virtchnl2_del_ena_dis_queues *)msg;
+			if (qs->chunks.num_chunks == 0 ||
+			    qs->chunks.num_chunks > VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (qs->chunks.num_chunks - 1) *
+				      sizeof(struct virtchnl2_queue_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+		valid_len = sizeof(struct virtchnl2_queue_vector_maps);
+		if (msglen >= valid_len) {
+			struct virtchnl2_queue_vector_maps *v_qp =
+				(struct virtchnl2_queue_vector_maps *)msg;
+			if (v_qp->num_qv_maps == 0 ||
+			    v_qp->num_qv_maps > VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (v_qp->num_qv_maps - 1) *
+				      sizeof(struct virtchnl2_queue_vector);
+		}
+		break;
+	case VIRTCHNL2_OP_ALLOC_VECTORS:
+		valid_len = sizeof(struct virtchnl2_alloc_vectors);
+		if (msglen >= valid_len) {
+			struct virtchnl2_alloc_vectors *v_av =
+				(struct virtchnl2_alloc_vectors *)msg;
+
+			if (v_av->vchunks.num_vchunks == 0) {
+				/* zero chunks is allowed as input */
+				break;
+			}
+
+			valid_len += (v_av->vchunks.num_vchunks - 1) *
+				      sizeof(struct virtchnl2_vector_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_DEALLOC_VECTORS:
+		valid_len = sizeof(struct virtchnl2_vector_chunks);
+		if (msglen >= valid_len) {
+			struct virtchnl2_vector_chunks *v_chunks =
+				(struct virtchnl2_vector_chunks *)msg;
+			if (v_chunks->num_vchunks == 0) {
+				err_msg_format = true;
+				break;
+			}
+			valid_len += (v_chunks->num_vchunks - 1) *
+				      sizeof(struct virtchnl2_vector_chunk);
+		}
+		break;
+	case VIRTCHNL2_OP_GET_RSS_KEY:
+	case VIRTCHNL2_OP_SET_RSS_KEY:
+		valid_len = sizeof(struct virtchnl2_rss_key);
+		if (msglen >= valid_len) {
+			struct virtchnl2_rss_key *vrk =
+				(struct virtchnl2_rss_key *)msg;
+
+			if (vrk->key_len == 0) {
+				/* zero length is allowed as input */
+				break;
+			}
+
+			valid_len += vrk->key_len - 1;
+		}
+		break;
+	case VIRTCHNL2_OP_GET_RSS_LUT:
+	case VIRTCHNL2_OP_SET_RSS_LUT:
+		valid_len = sizeof(struct virtchnl2_rss_lut);
+		if (msglen >= valid_len) {
+			struct virtchnl2_rss_lut *vrl =
+				(struct virtchnl2_rss_lut *)msg;
+
+			if (vrl->lut_entries == 0) {
+				/* zero entries is allowed as input */
+				break;
+			}
+
+			valid_len += (vrl->lut_entries - 1) * sizeof(vrl->lut);
+		}
+		break;
+	case VIRTCHNL2_OP_GET_RSS_HASH:
+	case VIRTCHNL2_OP_SET_RSS_HASH:
+		valid_len = sizeof(struct virtchnl2_rss_hash);
+		break;
+	case VIRTCHNL2_OP_SET_SRIOV_VFS:
+		valid_len = sizeof(struct virtchnl2_sriov_vfs_info);
+		break;
+	case VIRTCHNL2_OP_GET_PTYPE_INFO:
+		valid_len = sizeof(struct virtchnl2_get_ptype_info);
+		break;
+	case VIRTCHNL2_OP_GET_STATS:
+		valid_len = sizeof(struct virtchnl2_vport_stats);
+		break;
+	case VIRTCHNL2_OP_RESET_VF:
+		break;
+	/* These are always errors coming from the VF. */
+	case VIRTCHNL2_OP_EVENT:
+	case VIRTCHNL2_OP_UNKNOWN:
+	default:
+		return VIRTCHNL2_STATUS_ERR_PARAM;
+	}
+	/* few more checks */
+	if (err_msg_format || valid_len != msglen)
+		return VIRTCHNL2_STATUS_ERR_OPCODE_MISMATCH;
+
+	return 0;
+}
+
+#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/common/idpf/virtchnl2_lan_desc.h b/drivers/common/idpf/virtchnl2_lan_desc.h
new file mode 100644
index 0000000000..b8cb22e474
--- /dev/null
+++ b/drivers/common/idpf/virtchnl2_lan_desc.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+/*
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * For licensing information, see the file 'LICENSE' in the root folder
+ */
+#ifndef _VIRTCHNL2_LAN_DESC_H_
+#define _VIRTCHNL2_LAN_DESC_H_
+
+/* VIRTCHNL2_TX_DESC_IDS
+ * Transmit descriptor ID flags
+ */
+#define VIRTCHNL2_TXDID_DATA				BIT(0)
+#define VIRTCHNL2_TXDID_CTX				BIT(1)
+#define VIRTCHNL2_TXDID_REINJECT_CTX			BIT(2)
+#define VIRTCHNL2_TXDID_FLEX_DATA			BIT(3)
+#define VIRTCHNL2_TXDID_FLEX_CTX			BIT(4)
+#define VIRTCHNL2_TXDID_FLEX_TSO_CTX			BIT(5)
+#define VIRTCHNL2_TXDID_FLEX_TSYN_L2TAG1		BIT(6)
+#define VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2		BIT(7)
+#define VIRTCHNL2_TXDID_FLEX_TSO_L2TAG2_PARSTAG_CTX	BIT(8)
+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_TSO_CTX	BIT(9)
+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_SA_CTX		BIT(10)
+#define VIRTCHNL2_TXDID_FLEX_L2TAG2_CTX			BIT(11)
+#define VIRTCHNL2_TXDID_FLEX_FLOW_SCHED			BIT(12)
+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_TSO_CTX		BIT(13)
+#define VIRTCHNL2_TXDID_FLEX_HOSTSPLIT_CTX		BIT(14)
+#define VIRTCHNL2_TXDID_DESC_DONE			BIT(15)
+
+/* VIRTCHNL2_RX_DESC_IDS
+ * Receive descriptor IDs (range from 0 to 63)
+ */
+#define VIRTCHNL2_RXDID_0_16B_BASE			0
+#define VIRTCHNL2_RXDID_1_32B_BASE			1
+/* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
+ * differentiated based on queue model; e.g. single queue model can
+ * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
+ * for DID 2.
+ */
+#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ			2
+#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC			2
+#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW			3
+#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB		4
+#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL		5
+#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2			6
+#define VIRTCHNL2_RXDID_7_HW_RSVD			7
+/* 9 through 15 are reserved */
+#define VIRTCHNL2_RXDID_16_COMMS_GENERIC		16
+#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN		17
+#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4		18
+#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6		19
+#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW		20
+#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP		21
+/* 22 through 63 are reserved */
+
+/* VIRTCHNL2_RX_DESC_ID_BITMASKS
+ * Receive descriptor ID bitmasks
+ */
+#define VIRTCHNL2_RXDID_0_16B_BASE_M		BIT(VIRTCHNL2_RXDID_0_16B_BASE)
+#define VIRTCHNL2_RXDID_1_32B_BASE_M		BIT(VIRTCHNL2_RXDID_1_32B_BASE)
+#define VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M		BIT(VIRTCHNL2_RXDID_2_FLEX_SPLITQ)
+#define VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M		BIT(VIRTCHNL2_RXDID_2_FLEX_SQ_NIC)
+#define VIRTCHNL2_RXDID_3_FLEX_SQ_SW_M		BIT(VIRTCHNL2_RXDID_3_FLEX_SQ_SW)
+#define VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB_M	BIT(VIRTCHNL2_RXDID_4_FLEX_SQ_NIC_VEB)
+#define VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL_M	BIT(VIRTCHNL2_RXDID_5_FLEX_SQ_NIC_ACL)
+#define VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2_M	BIT(VIRTCHNL2_RXDID_6_FLEX_SQ_NIC_2)
+#define VIRTCHNL2_RXDID_7_HW_RSVD_M		BIT(VIRTCHNL2_RXDID_7_HW_RSVD)
+/* 9 through 15 are reserved */
+#define VIRTCHNL2_RXDID_16_COMMS_GENERIC_M	BIT(VIRTCHNL2_RXDID_16_COMMS_GENERIC)
+#define VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN_M	BIT(VIRTCHNL2_RXDID_17_COMMS_AUX_VLAN)
+#define VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4_M	BIT(VIRTCHNL2_RXDID_18_COMMS_AUX_IPV4)
+#define VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6_M	BIT(VIRTCHNL2_RXDID_19_COMMS_AUX_IPV6)
+#define VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW_M	BIT(VIRTCHNL2_RXDID_20_COMMS_AUX_FLOW)
+#define VIRTCHNL2_RXDID_21_COMMS_AUX_TCP_M	BIT(VIRTCHNL2_RXDID_21_COMMS_AUX_TCP)
+/* 22 through 63 are reserved */
+
+/* Rx */
+/* For splitq virtchnl2_rx_flex_desc_adv desc members */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S		0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M		\
+	MAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S		0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M		\
+	MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S		10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M		\
+	MAKEMASK(0x3UL, VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S		12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M			\
+	MAKEMASK(0xFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S		0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M	\
+	MAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S		14
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M			\
+	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S		15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M		\
+	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S		0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M		\
+	MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S		10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M			\
+	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S		11
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M			\
+	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S		12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M			\
+	MAKEMASK(0x7UL, VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S		15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M		\
+	BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
+
+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW1_BITS
+ * for splitq virtchnl2_rx_flex_desc_adv
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_S			0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_S		1
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_S		2
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S		3
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S		4
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S		5
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S		6
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S		7
+
+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_0_QW0_BITS
+ * for splitq virtchnl2_rx_flex_desc_adv
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_S		0
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S		1
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_S		2
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_S		3
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S		4
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S		5
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S	6
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S	7
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LAST			8 /* this entry must be last!!! */
+
+/* VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS_ERROR_1_BITS
+ * for splitq virtchnl2_rx_flex_desc_adv
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_S		0 /* 2 bits */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S		2
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S		3
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S	4
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S	5
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S	6
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S	7
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_LAST			8 /* this entry must be last!!! */
+
+/* for singleq (flex) virtchnl2_rx_flex_desc fields */
+/* for virtchnl2_rx_flex_desc.ptype_flex_flags0 member */
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_S			0
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M			\
+	MAKEMASK(0x3FFUL, VIRTCHNL2_RX_FLEX_DESC_PTYPE_S) /* 10 bits */
+
+/* for virtchnl2_rx_flex_desc.pkt_length member */
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S			0
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M			\
+	MAKEMASK(0x3FFFUL, VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_S) /* 14 bits */
+
+/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_0_BITS
+ * for singleq (flex) virtchnl2_rx_flex_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_S			0
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_S			1
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_S			2
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_S			3
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_S		4
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_S		5
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S		6
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S		7
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_S			8
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_S		9
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_S			10
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_S			11
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_S		12
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_S		13
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S		14
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S		15
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS0_LAST			16 /* this entry must be last!!! */
+
+/* VIRTCHNL2_RX_FLEX_DESC_STATUS_ERROR_1_BITS
+ * for singleq (flex) virtchnl2_rx_flex_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_S			0 /* 4 bits */
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_S			4
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_S			5
+/* [10:6] reserved */
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_S		11
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S		12
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S		13
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S		14
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S		15
+#define VIRTCHNL2_RX_FLEX_DESC_STATUS1_LAST			16 /* this entry must be last!!! */
+
+/* for virtchnl2_rx_flex_desc.ts_low member */
+#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID				BIT(0)
+
+/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S	63
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_M	\
+	BIT_ULL(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_SPH_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S	52
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_M	\
+	MAKEMASK(0x7FFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_HBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S	38
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M	\
+	MAKEMASK(0x3FFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S	30
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M	\
+	MAKEMASK(0xFFULL, VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S	19
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M	\
+	MAKEMASK(0xFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_S)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S	0
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M	\
+	MAKEMASK(0x7FFFFUL, VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_S)
+
+/* VIRTCHNL2_RX_BASE_DESC_STATUS_BITS
+ * for singleq (base) virtchnl2_rx_base_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_DD_S		0
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_S		1
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_S		2
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_S		3
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_S		4
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_S		5 /* 3 bits */
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_S	8
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_S		9 /* 2 bits */
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_S		11
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_S		12 /* 2 bits */
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_S		14
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_S	15
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_S		16 /* 2 bits */
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_S	18
+#define VIRTCHNL2_RX_BASE_DESC_STATUS_LAST		19 /* this entry must be last!!! */
+
+/* VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_BITS
+ * for singleq (base) virtchnl2_rx_base_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S	0
+
+/* VIRTCHNL2_RX_BASE_DESC_ERROR_BITS
+ * for singleq (base) virtchnl2_rx_base_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_S		0
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_S	1
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_S		2
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_S		3 /* 3 bits */
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_S		3
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_S		4
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_S		5
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_S		6
+#define VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_S		7
+
+/* VIRTCHNL2_RX_BASE_DESC_FLTSTAT_VALUES
+ * for singleq (base) virtchnl2_rx_base_desc
+ * Note: These are predefined bit offsets
+ */
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_NO_DATA		0
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_FD_ID		1
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSV		2
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH		3
+
+/* Receive Descriptors */
+/* splitq buf
+ * |                                       16|                   0|
+ * ----------------------------------------------------------------
+ * | RSV                                     | Buffer ID          |
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address                                     |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address                                     |
+ * ----------------------------------------------------------------
+ * | RSV                                                          |
+ * ----------------------------------------------------------------
+ * |                                                             0|
+ */
+struct virtchnl2_splitq_rx_buf_desc {
+	struct {
+		__le16  buf_id; /* Buffer Identifier */
+		__le16  rsvd0;
+		__le32  rsvd1;
+	} qword0;
+	__le64  pkt_addr; /* Packet buffer address */
+	__le64  hdr_addr; /* Header buffer address */
+	__le64  rsvd2;
+}; /* read used with buffer queues*/
+
+/* singleq buf
+ * |                                                             0|
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address                                     |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address                                     |
+ * ----------------------------------------------------------------
+ * | RSV                                                          |
+ * ----------------------------------------------------------------
+ * | RSV                                                          |
+ * ----------------------------------------------------------------
+ * |                                                             0|
+ */
+struct virtchnl2_singleq_rx_buf_desc {
+	__le64  pkt_addr; /* Packet buffer address */
+	__le64  hdr_addr; /* Header buffer address */
+	__le64  rsvd1;
+	__le64  rsvd2;
+}; /* read used with buffer queues*/
+
+union virtchnl2_rx_buf_desc {
+	struct virtchnl2_singleq_rx_buf_desc		read;
+	struct virtchnl2_splitq_rx_buf_desc		split_rd;
+};
+
+/* (0x00) singleq wb(compl) */
+struct virtchnl2_singleq_base_rx_desc {
+	struct {
+		struct {
+			__le16 mirroring_status;
+			__le16 l2tag1;
+		} lo_dword;
+		union {
+			__le32 rss; /* RSS Hash */
+			__le32 fd_id; /* Flow Director filter id */
+		} hi_dword;
+	} qword0;
+	struct {
+		/* status/error/PTYPE/length */
+		__le64 status_error_ptype_len;
+	} qword1;
+	struct {
+		__le16 ext_status; /* extended status */
+		__le16 rsvd;
+		__le16 l2tag2_1;
+		__le16 l2tag2_2;
+	} qword2;
+	struct {
+		__le32 reserved;
+		__le32 fd_id;
+	} qword3;
+}; /* writeback */
+
+/* (0x01) singleq flex compl */
+struct virtchnl2_rx_flex_desc {
+	/* Qword 0 */
+	u8 rxdid; /* descriptor builder profile id */
+	u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+	__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+	__le16 pkt_len; /* [15:14] are reserved */
+	__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+					/* sph=[11:11] */
+					/* ff1/ext=[15:12] */
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le16 flex_meta0;
+	__le16 flex_meta1;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flex_flags2;
+	u8 time_stamp_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le16 flex_meta2;
+	__le16 flex_meta3;
+	union {
+		struct {
+			__le16 flex_meta4;
+			__le16 flex_meta5;
+		} flex;
+		__le32 ts_high;
+	} flex_ts;
+};
+
+/* (0x02) */
+struct virtchnl2_rx_flex_desc_nic {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flex_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 flow_id;
+	union {
+		struct {
+			__le16 rsvd;
+			__le16 flow_id_ipv6;
+		} flex;
+		__le32 ts_high;
+	} flex_ts;
+};
+
+/* Rx Flex Descriptor Switch Profile
+ * RxDID Profile Id 3
+ * Flex-field 0: Source Vsi
+ */
+struct virtchnl2_rx_flex_desc_sw {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flex_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le16 src_vsi; /* [10:15] are reserved */
+	__le16 flex_md1_rsvd;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flex_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le32 rsvd; /* flex words 2-3 are reserved */
+	__le32 ts_high;
+};
+
+
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile Id 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow Id lower 16-bits
+ * Flex-field 3: Source Vsi
+ * Flex-field 4: reserved, Vlan id taken from L2Tag
+ */
+struct virtchnl2_rx_flex_desc_nic_2 {
+	/* Qword 0 */
+	u8 rxdid;
+	u8 mir_id_umb_cast;
+	__le16 ptype_flex_flags0;
+	__le16 pkt_len;
+	__le16 hdr_len_sph_flex_flags1;
+
+	/* Qword 1 */
+	__le16 status_error0;
+	__le16 l2tag1;
+	__le32 rss_hash;
+
+	/* Qword 2 */
+	__le16 status_error1;
+	u8 flexi_flags2;
+	u8 ts_low;
+	__le16 l2tag2_1st;
+	__le16 l2tag2_2nd;
+
+	/* Qword 3 */
+	__le16 flow_id;
+	__le16 src_vsi;
+	union {
+		struct {
+			__le16 rsvd;
+			__le16 flow_id_ipv6;
+		} flex;
+		__le32 ts_high;
+	} flex_ts;
+};
+
+/* Rx Flex Descriptor Advanced (Split Queue Model)
+ * RxDID Profile Id 7
+ */
+struct virtchnl2_rx_flex_desc_adv {
+	/* Qword 0 */
+	u8 rxdid_ucast; /* profile_id=[3:0] */
+			/* rsvd=[5:4] */
+			/* ucast=[7:6] */
+	u8 status_err0_qw0;
+	__le16 ptype_err_fflags0;	/* ptype=[9:0] */
+					/* ip_hdr_err=[10:10] */
+					/* udp_len_err=[11:11] */
+					/* ff0=[15:12] */
+	__le16 pktlen_gen_bufq_id;	/* plen=[13:0] */
+					/* gen=[14:14]  only in splitq */
+					/* bufq_id=[15:15] only in splitq */
+	__le16 hdrlen_flags;		/* header=[9:0] */
+					/* rsc=[10:10] only in splitq */
+					/* sph=[11:11] only in splitq */
+					/* ext_udp_0=[12:12] */
+					/* int_udp_0=[13:13] */
+					/* trunc_mirr=[14:14] */
+					/* miss_prepend=[15:15] */
+	/* Qword 1 */
+	u8 status_err0_qw1;
+	u8 status_err1;
+	u8 fflags1;
+	u8 ts_low;
+	__le16 fmd0;
+	__le16 fmd1;
+	/* Qword 2 */
+	__le16 fmd2;
+	u8 fflags2;
+	u8 hash3;
+	__le16 fmd3;
+	__le16 fmd4;
+	/* Qword 3 */
+	__le16 fmd5;
+	__le16 fmd6;
+	__le16 fmd7_0;
+	__le16 fmd7_1;
+}; /* writeback */
+
+/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
+ * RxDID Profile Id 8
+ * Flex-field 0: BufferID
+ * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
+ * Flex-field 2: Hash[15:0]
+ * Flex-flags 2: Hash[23:16]
+ * Flex-field 3: L2TAG2
+ * Flex-field 5: L2TAG1
+ * Flex-field 7: Timestamp (upper 32 bits)
+ */
+struct virtchnl2_rx_flex_desc_adv_nic_3 {
+	/* Qword 0 */
+	u8 rxdid_ucast; /* profile_id=[3:0] */
+			/* rsvd=[5:4] */
+			/* ucast=[7:6] */
+	u8 status_err0_qw0;
+	__le16 ptype_err_fflags0;	/* ptype=[9:0] */
+					/* ip_hdr_err=[10:10] */
+					/* udp_len_err=[11:11] */
+					/* ff0=[15:12] */
+	__le16 pktlen_gen_bufq_id;	/* plen=[13:0] */
+					/* gen=[14:14]  only in splitq */
+					/* bufq_id=[15:15] only in splitq */
+	__le16 hdrlen_flags;		/* header=[9:0] */
+					/* rsc=[10:10] only in splitq */
+					/* sph=[11:11] only in splitq */
+					/* ext_udp_0=[12:12] */
+					/* int_udp_0=[13:13] */
+					/* trunc_mirr=[14:14] */
+					/* miss_prepend=[15:15] */
+	/* Qword 1 */
+	u8 status_err0_qw1;
+	u8 status_err1;
+	u8 fflags1;
+	u8 ts_low;
+	__le16 buf_id; /* only in splitq */
+	union {
+		__le16 raw_cs;
+		__le16 l2tag1;
+		__le16 rscseglen;
+	} misc;
+	/* Qword 2 */
+	__le16 hash1;
+	union {
+		u8 fflags2;
+		u8 mirrorid;
+		u8 hash2;
+	} ff2_mirrid_hash2;
+	u8 hash3;
+	__le16 l2tag2;
+	__le16 fmd4;
+	/* Qword 3 */
+	__le16 l2tag1;
+	__le16 fmd6;
+	__le32 ts_high;
+}; /* writeback */
+
+union virtchnl2_rx_desc {
+	struct virtchnl2_singleq_rx_buf_desc		read;
+	struct virtchnl2_singleq_base_rx_desc		base_wb;
+	struct virtchnl2_rx_flex_desc			flex_wb;
+	struct virtchnl2_rx_flex_desc_nic		flex_nic_wb;
+	struct virtchnl2_rx_flex_desc_sw		flex_sw_wb;
+	struct virtchnl2_rx_flex_desc_nic_2		flex_nic_2_wb;
+	struct virtchnl2_rx_flex_desc_adv		flex_adv_wb;
+	struct virtchnl2_rx_flex_desc_adv_nic_3		flex_adv_nic_3_wb;
+};
+
+#endif /* _VIRTCHNL_LAN_DESC_H_ */
diff --git a/drivers/common/idpf/virtchnl_inline_ipsec.h b/drivers/common/idpf/virtchnl_inline_ipsec.h
new file mode 100644
index 0000000000..e19043ac47
--- /dev/null
+++ b/drivers/common/idpf/virtchnl_inline_ipsec.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM	3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM		16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM		128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER	2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN		128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM	8
+#define VIRTCHNL_IPSEC_SA_DESTROY		0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID		0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID		0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP	0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP	0xFFFFFFFF
+
+/* crypto type */
+#define VIRTCHNL_AUTH		1
+#define VIRTCHNL_CIPHER		2
+#define VIRTCHNL_AEAD		3
+
+/* caps enabled */
+#define VIRTCHNL_IPSEC_ESN_ENA			BIT(0)
+#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA		BIT(1)
+#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA		BIT(2)
+#define VIRTCHNL_IPSEC_AUDIT_ENA		BIT(3)
+#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA		BIT(4)
+#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA	BIT(5)
+#define VIRTCHNL_IPSEC_ARW_CHECK_ENA		BIT(6)
+#define VIRTCHNL_IPSEC_24BIT_SPI_ENA		BIT(7)
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_HASH_NO_ALG	0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC	1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC	2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC	3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC	4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC	5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC	6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC	7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC	8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC	9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC	10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC	11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC	12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC	13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC	14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_CIPHER_NO_ALG	15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC	16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC	17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR	18 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM	19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM	20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP	1
+#define VIRTCHNL_PROTO_AH	2
+#define VIRTCHNL_PROTO_RSVD1	3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT	1
+#define VIRTCHNL_SA_MODE_TUNNEL		2
+#define VIRTCHNL_SA_MODE_TRAN_TUN	3
+#define VIRTCHNL_SA_MODE_UNKNOWN	4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS		1
+#define VIRTCHNL_DIR_EGRESS		2
+#define VIRTCHNL_DIR_INGRESS_EGRESS	3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE	1
+#define VIRTCHNL_TERM_HARDWARE	2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4	1
+#define VIRTCHNL_IPV6	2
+
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+	INLINE_IPSEC_SUCCESS = 0,
+	INLINE_IPSEC_FAIL = -1,
+	INLINE_IPSEC_ERR_FIFO_FULL = -2,
+	INLINE_IPSEC_ERR_NOT_READY = -3,
+	INLINE_IPSEC_ERR_VF_DOWN = -4,
+	INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+	INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+	INLINE_IPSEC_OP_GET_CAP = 0,
+	INLINE_IPSEC_OP_GET_STATUS = 1,
+	INLINE_IPSEC_OP_SA_CREATE = 2,
+	INLINE_IPSEC_OP_SA_UPDATE = 3,
+	INLINE_IPSEC_OP_SA_DESTROY = 4,
+	INLINE_IPSEC_OP_SP_CREATE = 5,
+	INLINE_IPSEC_OP_SP_DESTROY = 6,
+	INLINE_IPSEC_OP_SA_READ = 7,
+	INLINE_IPSEC_OP_EVENT = 8,
+	INLINE_IPSEC_OP_RESP = 9,
+};
+
+#pragma pack(1)
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap  {
+	u32 algo_type;
+
+	u16 block_size;
+
+	u16 min_key_size;
+	u16 max_key_size;
+	u16 inc_key_size;
+
+	u16 min_iv_size;
+	u16 max_iv_size;
+	u16 inc_iv_size;
+
+	u16 min_digest_size;
+	u16 max_digest_size;
+	u16 inc_digest_size;
+
+	u16 min_aad_size;
+	u16 max_aad_size;
+	u16 inc_aad_size;
+};
+#pragma pack()
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+	u8 crypto_type;
+	u8 algo_cap_num;
+	struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+};
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+#pragma pack(1)
+struct virtchnl_ipsec_cap {
+	/* max number of SA per VF */
+	u16 max_sa_num;
+
+	/* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+	u8 virtchnl_protocol_type;
+
+	/* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+	u8 virtchnl_sa_mode;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 termination_mode;
+
+	/* number of supported crypto capability */
+	u8 crypto_cap_num;
+
+	/* descriptor ID */
+	u16 desc_id;
+
+	/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
+	u32 caps_enabled;
+
+	/* crypto capabilities */
+	struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+};
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+	u8 crypto_type;
+
+	u32 algo_type;
+
+	/* Length of valid IV data. */
+	u16 iv_len;
+
+	/* Length of digest */
+	u16 digest_len;
+
+	/* SA salt */
+	u32 salt;
+
+	/* The length of the symmetric key */
+	u16 key_len;
+
+	/* key data buffer */
+	u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+};
+#pragma pack()
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+	struct virtchnl_ipsec_crypto_cfg_item
+		items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+#pragma pack(1)
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* type of outer IP - IPv4/IPv6 */
+	u8 virtchnl_ip_type;
+
+	/* type of esn - !0:enable/0:disable */
+	u8 esn_enabled;
+
+	/* udp encap - !0:enable/0:disable */
+	u8 udp_encap_enabled;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* outer src ip address */
+	u8 src_addr[16];
+
+	/* outer dst ip address */
+	u8 dst_addr[16];
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* When enabled, sa_index must be valid */
+	u8 sa_index_en;
+
+	/* SA index when sa_index_en is true */
+	u32 sa_index;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When enabled, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-reply window check - enable/disable
+	 * When enabled, arw_size must be valid.
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* no ip offload mode - enable/disable
+	 * When enabled, ip type and address must not be valid.
+	 */
+	u8 no_ip_offload_en;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* crypto configuration */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+};
+#pragma pack()
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+	u32 sa_index; /* SA to update */
+	u32 esn_hi; /* high 32 bits of esn */
+	u32 esn_low; /* low 32 bits of esn */
+};
+
+#pragma pack(1)
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+	/* All zero bitmap indicates all SA will be destroyed.
+	 * Non-zero bitmap indicates the selected SA in
+	 * array sa_index will be destroyed.
+	 */
+	u8 flag;
+
+	/* selected SA index */
+	u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+	/* SA valid - invalid/valid */
+	u8 valid;
+
+	/* SA active - inactive/active */
+	u8 active;
+
+	/* SA SN rollover - not_rollover/rollover */
+	u8 sn_rollover;
+
+	/* IPsec SA Protocol - AH/ESP */
+	u8 virtchnl_protocol_type;
+
+	/* termination mode - value ref VIRTCHNL_TERM_XXX */
+	u8 virtchnl_termination;
+
+	/* auditing mode - enable/disable */
+	u8 audit_en;
+
+	/* lifetime byte limit - enable/disable
+	 * When set to limit, byte_limit_hard and byte_limit_soft
+	 * must be valid.
+	 */
+	u8 byte_limit_en;
+
+	/* hard byte limit count */
+	u64 byte_limit_hard;
+
+	/* soft byte limit count */
+	u64 byte_limit_soft;
+
+	/* drop on authentication failure - enable/disable */
+	u8 drop_on_auth_fail_en;
+
+	/* anti-replay window check - enable/disable
+	 * When set to check, arw_size, arw_top, and arw must be valid
+	 */
+	u8 arw_check_en;
+
+	/* size of arw window, offset by 1. Setting to 0
+	 * represents ARW window size of 1. Setting to 127
+	 * represents ARW window size of 128
+	 */
+	u8 arw_size;
+
+	/* reserved */
+	u8 reserved1;
+
+	/* top of anti-replay-window */
+	u64 arw_top;
+
+	/* anti-replay-window */
+	u8 arw[16];
+
+	/* packets processed  */
+	u64 packets_processed;
+
+	/* bytes processed  */
+	u64 bytes_processed;
+
+	/* packets dropped  */
+	u32 packets_dropped;
+
+	/* authentication failures */
+	u32 auth_fails;
+
+	/* ARW check failures */
+	u32 arw_fails;
+
+	/* type of esn - enable/disable */
+	u8 esn;
+
+	/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+	u8 virtchnl_direction;
+
+	/* SA security parameter index */
+	u32 spi;
+
+	/* SA salt */
+	u32 salt;
+
+	/* high 32 bits of esn */
+	u32 esn_hi;
+
+	/* low 32 bits of esn */
+	u32 esn_low;
+
+	/* SA Domain. Used to logical separate an SADB into groups.
+	 * PF drivers supporting a single group ignore this field.
+	 */
+	u16 sa_domain;
+
+	/* SPD reference. Used to link an SA with its policy.
+	 * PF drivers may ignore this field.
+	 */
+	u16 spd_ref;
+
+	/* crypto configuration. Salt and keys are set to 0 */
+	struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+};
+#pragma pack()
+
+/* Add allowlist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+	u32 spi;
+	u32 dip[4];
+
+	/* Drop frame if true or redirect to QAT if false. */
+	u8 drop;
+
+	/* Congestion domain. For future use. */
+	u8 cgd;
+
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+
+	/* Set TC (congestion domain) if true. For future use. */
+	u8 set_tc;
+
+	/* 0 for NAT-T unsupported, 1 for NAT-T supported */
+	u8 is_udp;
+
+	/* reserved */
+	u8 reserved;
+
+	/* NAT-T UDP port number. Only valid in case NAT-T supported */
+	u16 udp_port;
+};
+
+#pragma pack(1)
+/* Delete allowlist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+	/* 0 for IPv4 table, 1 for IPv6 table. */
+	u8 table_id;
+	u32 rule_id;
+};
+#pragma pack()
+
+/* Response from IES to allowlist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+	u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+	u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET	0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON	0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF	0x4
+
+struct virtchnl_ipsec_event {
+	u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE	0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE	0x2
+
+struct virtchnl_ipsec_status {
+	u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+	u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+	u16 ipsec_opcode;
+	u16 req_id;
+
+	union {
+		/* IPsec request */
+		struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+		struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+		struct virtchnl_ipsec_sa_update sa_update[0];
+		struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+		struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+		/* IPsec response */
+		struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+		struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+		struct virtchnl_ipsec_cap ipsec_cap[0];
+		struct virtchnl_ipsec_status ipsec_status[0];
+		/* response to del_sa, del_sp, update_sa */
+		struct virtchnl_ipsec_resp ipsec_resp[0];
+
+		/* IPsec event (no req_id is required) */
+		struct virtchnl_ipsec_event event[0];
+
+		/* Reserved */
+		struct virtchnl_ipsec_sa_read sa_read[0];
+	} ipsec_data;
+};
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+	u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+	switch (opcode) {
+	case INLINE_IPSEC_OP_GET_CAP:
+	case INLINE_IPSEC_OP_GET_STATUS:
+		break;
+	case INLINE_IPSEC_OP_SA_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+		break;
+	case INLINE_IPSEC_OP_SP_CREATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+		break;
+	case INLINE_IPSEC_OP_SA_UPDATE:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+		break;
+	case INLINE_IPSEC_OP_SA_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+		break;
+	case INLINE_IPSEC_OP_SP_DESTROY:
+		valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+		break;
+	/* Only for msg length calculation of response to VF in case of
+	 * inline ipsec failure.
+	 */
+	case INLINE_IPSEC_OP_RESP:
+		valid_len += sizeof(struct virtchnl_ipsec_resp);
+		break;
+	default:
+		valid_len = 0;
+		break;
+	}
+
+	return valid_len;
+}
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
index ea261dd70a..b63d899d50 100644
--- a/drivers/common/meson.build
+++ b/drivers/common/meson.build
@@ -6,6 +6,7 @@ drivers = [
         'cpt',
         'dpaax',
         'iavf',
+        'idpf',
         'mvep',
         'octeontx',
 ]
-- 
2.34.1


  reply	other threads:[~2022-10-20  6:32 UTC|newest]

Thread overview: 376+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-03 11:30 [PATCH 00/13] add support for idpf PMD in DPDK Junfeng Guo
2022-08-03 11:30 ` [PATCH 01/13] net/idpf/base: introduce base code Junfeng Guo
2022-08-03 11:30 ` [PATCH 02/13] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-08-03 11:30 ` [PATCH 03/13] net/idpf: support device initialization Junfeng Guo
2022-08-03 15:11   ` Stephen Hemminger
2022-08-08  4:43     ` Guo, Junfeng
2022-10-31 18:00   ` Ali Alnubani
2022-11-01  6:55     ` Xing, Beilei
2022-11-02 15:31   ` Raslan Darawsheh
2022-11-02 15:52     ` Thomas Monjalon
2022-11-03  0:56     ` Xing, Beilei
2022-08-03 11:30 ` [PATCH 04/13] net/idpf: add queue operations Junfeng Guo
2022-08-03 15:16   ` Stephen Hemminger
2022-08-08  4:44     ` Guo, Junfeng
2022-08-03 11:30 ` [PATCH 05/13] net/idpf: add support to get device information Junfeng Guo
2022-08-03 11:30 ` [PATCH 06/13] net/idpf: add support to get packet type Junfeng Guo
2022-08-03 11:30 ` [PATCH 07/13] net/idpf: add support to update link status Junfeng Guo
2022-08-03 11:30 ` [PATCH 08/13] net/idpf: add basic Rx/Tx datapath Junfeng Guo
2022-08-03 11:31 ` [PATCH 09/13] net/idpf: add support for RSS Junfeng Guo
2022-08-03 11:31 ` [PATCH 10/13] net/idpf: add mtu configuration Junfeng Guo
2022-08-03 11:31 ` [PATCH 11/13] net/idpf: add hw statistics Junfeng Guo
2022-08-03 11:31 ` [PATCH 12/13] net/idpf: support write back based on ITR expire Junfeng Guo
2022-08-03 11:31 ` [PATCH 13/13] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-09-05 10:58   ` [PATCH v2 01/14] net/idpf/base: introduce base code Junfeng Guo
2022-10-03 13:20     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-09-05 10:58   ` [PATCH v2 02/14] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-10-03 13:20     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-10-12  8:07     ` Wu, Wenjun1
2022-09-05 10:58   ` [PATCH v2 03/14] net/idpf: add support for device initialization Junfeng Guo
2022-09-21  5:41     ` Xing, Beilei
2022-09-21  6:04     ` Xing, Beilei
2022-10-03 13:44     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-10-10  7:48     ` Wu, Wenjun1
2022-09-05 10:58   ` [PATCH v2 04/14] net/idpf: add support for queue operations Junfeng Guo
2022-10-03 13:47     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-09-05 10:58   ` [PATCH v2 05/14] net/idpf: add support for device information get Junfeng Guo
2022-10-03 13:53     ` Andrew Rybchenko
2022-09-05 10:58   ` [PATCH v2 06/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-03 13:58     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-09-05 10:58   ` [PATCH v2 07/14] net/idpf: add support for link status update Junfeng Guo
2022-09-05 10:58   ` [PATCH v2 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-03 14:02     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-09-05 10:58   ` [PATCH v2 09/14] net/idpf: add support for RSS Junfeng Guo
2022-10-03 14:10     ` Andrew Rybchenko
2022-09-05 10:58   ` [PATCH v2 10/14] net/idpf: add support for mtu configuration Junfeng Guo
2022-10-03 14:12     ` Andrew Rybchenko
2022-10-14  9:18       ` Guo, Junfeng
2022-09-05 10:58   ` [PATCH v2 11/14] net/idpf: add support for hw statistics Junfeng Guo
2022-09-05 10:58   ` [PATCH v2 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-09-05 10:58   ` [PATCH v2 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-03 14:20     ` Andrew Rybchenko
2022-10-14  9:19       ` Guo, Junfeng
2022-10-10  8:06     ` Wu, Wenjun1
2022-09-05 10:58   ` [PATCH v2 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-03 14:22     ` Andrew Rybchenko
2022-10-14  9:19       ` Guo, Junfeng
2022-10-10  7:56     ` Wu, Wenjun1
2022-10-03 13:31   ` [PATCH v2 00/14] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-03 14:36     ` Andrew Rybchenko
2022-10-18 11:09       ` Guo, Junfeng
2022-10-18 11:12   ` [PATCH v3 00/15] " Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 01/15] common/idpf: introduce common library Junfeng Guo
2022-10-19 10:37       ` [PATCH v4 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 11:03           ` [PATCH v5 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 14:54               ` [PATCH v6 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20  2:41                   ` [PATCH v7 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20  2:41                     ` [PATCH v7 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20  6:29                       ` [PATCH v8 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20  6:29                         ` Junfeng Guo [this message]
2022-10-21  5:18                           ` [PATCH v9 " Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-21  6:40                               ` Andrew Rybchenko
2022-10-21 12:35                                 ` Xing, Beilei
2022-10-21 12:38                                   ` Andrew Rybchenko
2022-10-21 12:46                                   ` Zhang, Qi Z
2022-10-24 13:01                               ` [PATCH v10 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 01/14] net/idpf: add support for device start and stop Junfeng Guo
2022-10-24 13:12                                   ` [PATCH v11 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-26 10:10                                       ` [PATCH v12 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27  5:44                                           ` [PATCH v13 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27  7:47                                               ` [PATCH v14 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-28 15:35                                                   ` Andrew Rybchenko
2022-10-28 17:22                                                     ` Xing, Beilei
2022-10-27  7:47                                                 ` [PATCH v14 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-28 15:45                                                   ` Andrew Rybchenko
2022-10-27  7:47                                                 ` [PATCH v14 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-28 15:50                                                   ` Andrew Rybchenko
2022-10-28 17:34                                                     ` Xing, Beilei
2022-10-27  7:47                                                 ` [PATCH v14 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27  7:47                                                 ` [PATCH v14 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-29  3:27                                                 ` [PATCH v15 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 01/18] common/idpf: introduce common library beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 04/18] net/idpf: add Rx " beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 06/18] net/idpf: add support for queue start beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 08/18] net/idpf: add queue release beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 14/18] net/idpf: add support for RSS beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-29  3:27                                                   ` [PATCH v15 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-29 14:48                                                   ` [PATCH v15 00/18] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-31  2:26                                                     ` Xing, Beilei
2022-10-31  3:36                                                   ` [PATCH v16 " beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 01/18] common/idpf: introduce common library beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 04/18] net/idpf: add Rx " beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 08/18] net/idpf: add queue release beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31  3:36                                                     ` [PATCH v16 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31  5:15                                                     ` [PATCH v17 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 01/18] common/idpf: introduce common library beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 04/18] net/idpf: add Rx " beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 08/18] net/idpf: add queue release beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31  5:15                                                       ` [PATCH v17 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31  8:33                                                       ` [PATCH v18 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 01/18] common/idpf: introduce common library beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 04/18] net/idpf: add Rx " beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 08/18] net/idpf: add queue release beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31  8:33                                                         ` [PATCH v18 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 13:38                                                         ` [PATCH v18 00/18] add support for idpf PMD in DPDK Thomas Monjalon
2022-10-27  5:44                                             ` [PATCH v13 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27  5:44                                             ` [PATCH v13 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27  5:45                                             ` [PATCH v13 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 08/18] net/idpf: add queue release Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 12/18] net/idpf: support packet type get Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-26 10:10                                         ` [PATCH v12 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-25  8:57                                       ` Andrew Rybchenko
2022-10-26  8:28                                         ` Xing, Beilei
2022-10-28 15:14                                         ` Andrew Rybchenko
2022-10-28 17:19                                           ` Xing, Beilei
2022-10-24 13:12                                     ` [PATCH v11 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-25  9:40                                       ` Andrew Rybchenko
2022-10-26  8:34                                         ` Xing, Beilei
2022-10-24 13:12                                     ` [PATCH v11 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-25  9:49                                       ` Andrew Rybchenko
2022-10-26  8:38                                         ` Xing, Beilei
2022-10-24 13:12                                     ` [PATCH v11 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 08/18] net/idpf: add queue release Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 09/18] net/idpf: add support for packet type get Junfeng Guo
2022-10-25  9:57                                       ` Andrew Rybchenko
2022-10-24 13:12                                     ` [PATCH v11 10/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 11/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 12/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-25 10:12                                       ` Andrew Rybchenko
2022-10-24 13:12                                     ` [PATCH v11 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-25 10:03                                       ` Andrew Rybchenko
2022-10-28  1:48                                         ` Xing, Beilei
2022-10-24 13:12                                     ` [PATCH v11 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-25 10:14                                       ` Andrew Rybchenko
2022-10-24 13:12                                     ` [PATCH v11 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:12                                     ` [PATCH v11 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 02/14] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 03/14] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 04/14] net/idpf: add queue release Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 05/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 06/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 07/14] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 08/14] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 09/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 11/14] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 12/14] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:01                                 ` [PATCH v10 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-21  7:39                               ` Andrew Rybchenko
2022-10-21  7:48                                 ` Andrew Rybchenko
2022-10-21 12:41                                   ` Zhang, Qi Z
2022-10-25  7:52                                     ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-21  7:44                               ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-21  7:53                               ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-21  7:56                               ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-21  8:00                               ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-21  8:29                               ` Andrew Rybchenko
2022-10-24 13:26                                 ` Xing, Beilei
2022-10-21  5:18                             ` [PATCH v9 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-21  8:38                               ` Andrew Rybchenko
2022-10-21  5:18                             ` [PATCH v9 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-21  5:18                             ` [PATCH v9 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20  6:29                         ` [PATCH v8 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 14:54                 ` [PATCH v6 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 11:03             ` [PATCH v5 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 10:37         ` [PATCH v4 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 02/15] net/idpf: add support for device initialization Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 03/15] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 04/15] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 05/15] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 06/15] net/idpf: add support for device information get Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 07/15] net/idpf: add support for packet type get Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 08/15] net/idpf: add support for link status update Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 09/15] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 10/15] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 11/15] net/idpf: add support for RSS Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 12/15] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 13/15] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 14/15] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-18 11:12     ` [PATCH v3 15/15] net/idpf: add support for timestamp offload Junfeng Guo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221020062951.645121-2-junfeng.guo@intel.com \
    --to=junfeng.guo@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=xiao.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).