* [dpdk-dev] [PATCH v1 2/5] net/iavf: support Ethernet CRC strip disable
2020-12-14 7:07 [dpdk-dev] [PATCH v1 1/5] common/iavf: new VLAN opcode Haiyue Wang
@ 2020-12-14 7:07 ` Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 3/5] net/ice: enable QinQ filter for switch Haiyue Wang
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Haiyue Wang @ 2020-12-14 7:07 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, jingjing.wu, qi.z.zhang, Haiyue Wang, Beilei Xing
The VF will check the PF's CRC strip capability firstly, then set the
'CRC strip disable' value in the queue configuration according to the
RX CRC offload setting.
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/iavf/iavf_ethdev.c | 3 +++
drivers/net/iavf/iavf_rxtx.c | 6 +++++-
drivers/net/iavf/iavf_vchnl.c | 3 ++-
3 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7e3c26a94..0fd06e4b4 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -795,6 +795,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_KEEP_CRC;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 21d508b3f..d53d7b984 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -550,11 +550,15 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->rx_free_thresh = rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = 0; /* crc stripping by default */
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->rx_hdr_len = 0;
rxq->vsi = vsi;
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 33d03af65..3f949c9e3 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -458,6 +458,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_LARGE_NUM_QPAIRS;
args.in_args = (uint8_t *)∩︀
@@ -851,7 +852,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
-
+ vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (vf->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
--
2.29.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH v1 3/5] net/ice: enable QinQ filter for switch
2020-12-14 7:07 [dpdk-dev] [PATCH v1 1/5] common/iavf: new VLAN opcode Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 2/5] net/iavf: support Ethernet CRC strip disable Haiyue Wang
@ 2020-12-14 7:07 ` Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 4/5] net/ice: add DCF port representor Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 5/5] net/iavf: support new VLAN virtchnl opcodes Haiyue Wang
3 siblings, 0 replies; 5+ messages in thread
From: Haiyue Wang @ 2020-12-14 7:07 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, jingjing.wu, qi.z.zhang, Haiyue Wang, Wei Zhao
Enable the double VLAN support for QinQ filter switch.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 8 +++
drivers/net/ice/ice_generic_flow.h | 1 +
drivers/net/ice/ice_switch_filter.c | 104 +++++++++++++++++++++++++---
3 files changed, 102 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 1429cbc3b..1712d3b2e 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1455,6 +1455,14 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_pppoes_ipv4[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_PPPOES,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 434d2f425..dc45d8dc6 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -426,6 +426,7 @@ extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
+extern enum rte_flow_item_type pattern_eth_qinq_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 8cba6eb7b..43c755e30 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -35,11 +35,15 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
#define ICE_SW_INSET_MAC_VLAN ( \
- ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
- ICE_INSET_VLAN_OUTER)
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_INNER)
+#define ICE_SW_INSET_MAC_QINQ ( \
+ ICE_SW_INSET_MAC_VLAN | ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
+#define ICE_SW_INSET_MAC_QINQ_IPV4 ( \
+ ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4)
#define ICE_SW_INSET_MAC_IPV4_TCP ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \
@@ -52,6 +56,8 @@
ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \
ICE_INSET_IPV6_NEXT_HDR)
+#define ICE_SW_INSET_MAC_QINQ_IPV6 ( \
+ ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6)
#define ICE_SW_INSET_MAC_IPV6_TCP ( \
ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \
@@ -182,6 +188,8 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
{pattern_ethertype_vlan,
ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
+ {pattern_ethertype_qinq,
+ ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -262,6 +270,18 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_pfcp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_qinq_ipv4,
+ ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
+ {pattern_eth_qinq_ipv6,
+ ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_ipv4,
+ ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_ipv6,
+ ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
};
static struct
@@ -304,6 +324,8 @@ ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
{pattern_ethertype_vlan,
ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
+ {pattern_ethertype_qinq,
+ ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -384,6 +406,18 @@ ice_pattern_match_item ice_switch_pattern_perm_comms[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_pfcp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_qinq_ipv4,
+ ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE},
+ {pattern_eth_qinq_ipv6,
+ ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_ipv4,
+ ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
+ {pattern_eth_qinq_pppoes_ipv6,
+ ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
};
static int
@@ -516,6 +550,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
bool pppoe_elem_valid = 0;
bool pppoe_patt_valid = 0;
bool pppoe_prot_valid = 0;
+ bool inner_vlan_valid = 0;
+ bool outer_vlan_valid = 0;
bool tunnel_valid = 0;
bool profile_rule = 0;
bool nvgre_valid = 0;
@@ -1062,23 +1098,40 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid VLAN item");
return 0;
}
+
+ if (!outer_vlan_valid &&
+ (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+ *tun_type == ICE_NON_TUN_QINQ))
+ outer_vlan_valid = 1;
+ else if (!inner_vlan_valid &&
+ (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+ *tun_type == ICE_NON_TUN_QINQ))
+ inner_vlan_valid = 1;
+ else if (!inner_vlan_valid)
+ inner_vlan_valid = 1;
+
if (vlan_spec && vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
+ if (outer_vlan_valid && !inner_vlan_valid) {
+ list[t].type = ICE_VLAN_EX;
+ input_set |= ICE_INSET_VLAN_OUTER;
+ } else if (inner_vlan_valid) {
+ list[t].type = ICE_VLAN_OFOS;
+ input_set |= ICE_INSET_VLAN_INNER;
+ }
+
if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
vlan_mask->tci;
- input_set |= ICE_INSET_VLAN_OUTER;
input_set_byte += 2;
}
if (vlan_mask->inner_type) {
- list[t].h_u.vlan_hdr.type =
- vlan_spec->inner_type;
- list[t].m_u.vlan_hdr.type =
- vlan_mask->inner_type;
- input_set |= ICE_INSET_ETHERTYPE;
- input_set_byte += 2;
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VLAN input set.");
+ return 0;
}
t++;
}
@@ -1380,8 +1433,27 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
}
+ if (*tun_type == ICE_SW_TUN_PPPOE_PAY &&
+ inner_vlan_valid && outer_vlan_valid)
+ *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ;
+ else if (*tun_type == ICE_SW_TUN_PPPOE &&
+ inner_vlan_valid && outer_vlan_valid)
+ *tun_type = ICE_SW_TUN_PPPOE_QINQ;
+ else if (*tun_type == ICE_NON_TUN &&
+ inner_vlan_valid && outer_vlan_valid)
+ *tun_type = ICE_NON_TUN_QINQ;
+ else if (*tun_type == ICE_SW_TUN_AND_NON_TUN &&
+ inner_vlan_valid && outer_vlan_valid)
+ *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+
if (pppoe_patt_valid && !pppoe_prot_valid) {
- if (ipv6_valid && udp_valid)
+ if (inner_vlan_valid && outer_vlan_valid && ipv4_valid)
+ *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ;
+ else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid)
+ *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ;
+ else if (inner_vlan_valid && outer_vlan_valid)
+ *tun_type = ICE_SW_TUN_PPPOE_QINQ;
+ else if (ipv6_valid && udp_valid)
*tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP;
else if (ipv6_valid && tcp_valid)
*tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP;
@@ -1659,6 +1731,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
uint16_t lkups_num = 0;
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
+ uint16_t vlan_num = 0;
enum ice_sw_tunnel_type tun_type =
ICE_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
@@ -1674,6 +1747,10 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
if (eth_mask->type == UINT16_MAX)
tun_type = ICE_SW_TUN_AND_NON_TUN;
}
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN)
+ vlan_num++;
+
/* reserve one more memory slot for ETH which may
* consume 2 lookup items.
*/
@@ -1681,6 +1758,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
item_num++;
}
+ if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN)
+ tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ;
+ else if (vlan_num == 2)
+ tun_type = ICE_NON_TUN_QINQ;
+
list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
if (!list) {
rte_flow_error_set(error, EINVAL,
--
2.29.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH v1 4/5] net/ice: add DCF port representor
2020-12-14 7:07 [dpdk-dev] [PATCH v1 1/5] common/iavf: new VLAN opcode Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 2/5] net/iavf: support Ethernet CRC strip disable Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 3/5] net/ice: enable QinQ filter for switch Haiyue Wang
@ 2020-12-14 7:07 ` Haiyue Wang
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 5/5] net/iavf: support new VLAN virtchnl opcodes Haiyue Wang
3 siblings, 0 replies; 5+ messages in thread
From: Haiyue Wang @ 2020-12-14 7:07 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, jingjing.wu, qi.z.zhang, Haiyue Wang
Add the DCF port representor infrastructure for the VFs of DCF attached
PF. Then the standard ethdev API can be used to configure the VFs.
The main function is VLAN related.
Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/ice/ice_dcf.c | 1 +
drivers/net/ice/ice_dcf_ethdev.c | 91 +++++-
drivers/net/ice/ice_dcf_ethdev.h | 20 ++
drivers/net/ice/ice_dcf_vf_representor.c | 356 +++++++++++++++++++++++
drivers/net/ice/meson.build | 1 +
5 files changed, 462 insertions(+), 7 deletions(-)
create mode 100644 drivers/net/ice/ice_dcf_vf_representor.c
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 44dbd3bb8..4a9af3292 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -234,6 +234,7 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index b0b2ecb0d..a9e78064d 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -970,20 +970,97 @@ ice_dcf_cap_selected(struct rte_devargs *devargs)
return ret;
}
-static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
+static int
+eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
{
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ struct ice_dcf_vf_repr_param repr_param;
+ char repr_name[RTE_ETH_NAME_MAX_LEN];
+ struct ice_dcf_adapter *dcf_adapter;
+ struct rte_eth_dev *dcf_ethdev;
+ uint16_t dcf_vsi_id;
+ int i, ret;
+
if (!ice_dcf_cap_selected(pci_dev->device.devargs))
return 1;
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct ice_dcf_adapter),
- ice_dcf_dev_init);
+ ret = rte_eth_devargs_parse(pci_dev->device.devargs->args, ð_da);
+ if (ret)
+ return ret;
+
+ ret = rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ice_dcf_adapter),
+ ice_dcf_dev_init);
+ if (ret || !eth_da.nb_representor_ports)
+ return ret;
+
+ dcf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (dcf_ethdev == NULL)
+ return -ENODEV;
+
+ dcf_adapter = dcf_ethdev->data->dev_private;
+
+ if (eth_da.nb_representor_ports > dcf_adapter->real_hw.num_vfs ||
+ eth_da.nb_representor_ports >= RTE_MAX_ETHPORTS) {
+ PMD_DRV_LOG(ERR, "the number of port representors is too large: %u",
+ eth_da.nb_representor_ports);
+ return -EINVAL;
+ }
+
+ dcf_vsi_id = dcf_adapter->real_hw.vsi_id | VIRTCHNL_DCF_VF_VSI_VALID;
+
+ repr_param.adapter = dcf_adapter;
+ repr_param.switch_domain_id = 0;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ uint16_t vf_id = eth_da.representor_ports[i];
+
+ if (vf_id >= dcf_adapter->real_hw.num_vfs) {
+ PMD_DRV_LOG(ERR, "VF ID %u is out of range (0 ~ %u)",
+ vf_id, dcf_adapter->real_hw.num_vfs - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) {
+ PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id);
+ ret = -EINVAL;
+ break;
+ }
+
+ repr_param.vf_id = vf_id;
+ snprintf(repr_name, sizeof(repr_name), "net_%s_representor_%u",
+ pci_dev->device.name, vf_id);
+ ret = rte_eth_dev_create(&pci_dev->device, repr_name,
+ sizeof(struct ice_dcf_vf_repr),
+ NULL, NULL, ice_dcf_vf_repr_init,
+ &repr_param);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to create DCF VF representor %s",
+ repr_name);
+ break;
+ }
+ }
+
+ return ret;
}
-static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
+static int
+eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!eth_dev)
+ return 0;
+
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ ice_dcf_vf_repr_uninit);
+ else
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ ice_dcf_dev_uninit);
}
static const struct rte_pci_id pci_id_ice_dcf_map[] = {
diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h
index b54528bea..bd5552332 100644
--- a/drivers/net/ice/ice_dcf_ethdev.h
+++ b/drivers/net/ice/ice_dcf_ethdev.h
@@ -22,9 +22,29 @@ struct ice_dcf_adapter {
struct ice_dcf_hw real_hw;
};
+struct ice_dcf_vf_repr_param {
+ struct ice_dcf_adapter *adapter;
+ uint16_t switch_domain_id;
+ uint16_t vf_id;
+};
+
+struct ice_dcf_vf_repr {
+ struct ice_dcf_adapter *dcf_adapter;
+ struct rte_ether_addr mac_addr;
+ uint16_t switch_domain_id;
+ uint16_t vf_id;
+
+ uint16_t outer_vlan_tpid;
+ uint16_t pvid;
+ uint16_t hw_vlan_insert_pvid:1;
+};
+
void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
uint8_t *msg, uint16_t msglen);
int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev);
void ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev);
+int ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param);
+int ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev);
+
#endif /* _ICE_DCF_ETHDEV_H_ */
diff --git a/drivers/net/ice/ice_dcf_vf_representor.c b/drivers/net/ice/ice_dcf_vf_representor.c
new file mode 100644
index 000000000..e9806895d
--- /dev/null
+++ b/drivers/net/ice/ice_dcf_vf_representor.c
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <errno.h>
+#include <sys/types.h>
+
+#include <rte_ethdev.h>
+
+#include "ice_dcf_ethdev.h"
+#include "ice_rxtx.h"
+
+static uint16_t
+ice_dcf_vf_repr_rx_burst(__rte_unused void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ice_dcf_vf_repr_tx_burst(__rte_unused void *txq,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_start(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_stop(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_close(struct rte_eth_dev *dev)
+{
+ return ice_dcf_vf_repr_uninit(dev);
+}
+
+static int
+ice_dcf_vf_repr_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *conf,
+ __rte_unused struct rte_mempool *pool)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue_id,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *conf)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_promiscuous_enable(__rte_unused struct rte_eth_dev *ethdev)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_promiscuous_disable(__rte_unused struct rte_eth_dev *ethdev)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_link_update(__rte_unused struct rte_eth_dev *ethdev,
+ __rte_unused int wait_to_complete)
+{
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+ struct ice_dcf_hw *dcf_hw =
+ &repr->dcf_adapter->real_hw;
+
+ dev_info->device = dev->device;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_queues = dcf_hw->vsi_res->num_queue_pairs;
+ dev_info->max_tx_queues = dcf_hw->vsi_res->num_queue_pairs;
+ dev_info->min_rx_bufsize = ICE_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = ICE_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = dcf_hw->vf_res->rss_key_size;
+ dev_info->reta_size = dcf_hw->vf_res->rss_lut_size;
+ dev_info->flow_type_rss_offloads = ICE_RSS_OFFLOAD_ALL;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_RSS_HASH;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = ICE_DEFAULT_RX_PTHRESH,
+ .hthresh = ICE_DEFAULT_RX_HTHRESH,
+ .wthresh = ICE_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = ICE_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = ICE_DEFAULT_TX_PTHRESH,
+ .hthresh = ICE_DEFAULT_TX_HTHRESH,
+ .wthresh = ICE_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = ICE_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = ICE_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ICE_MAX_RING_DESC,
+ .nb_min = ICE_MIN_RING_DESC,
+ .nb_align = ICE_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name = dcf_hw->eth_dev->device->name;
+ dev_info->switch_info.domain_id = repr->switch_domain_id;
+ dev_info->switch_info.port_id = repr->vf_id;
+
+ return 0;
+}
+
+static int
+ice_dcf_vlan_offload_config(struct ice_dcf_vf_repr *repr,
+ struct virtchnl_dcf_vlan_offload *vlan_offload)
+{
+ struct dcf_virtchnl_cmd args;
+
+ memset(&args, 0, sizeof(args));
+ args.v_op = VIRTCHNL_OP_DCF_VLAN_OFFLOAD;
+ args.req_msg = (uint8_t *)vlan_offload;
+ args.req_msglen = sizeof(*vlan_offload);
+
+ return ice_dcf_execute_virtchnl_cmd(&repr->dcf_adapter->real_hw, &args);
+}
+
+static __rte_always_inline bool
+ice_dcf_vlan_offload_ena(struct ice_dcf_vf_repr *repr)
+{
+ return !!(repr->dcf_adapter->real_hw.vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2);
+}
+
+static int
+ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev,
+ uint16_t pvid, int on)
+{
+ struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+ struct virtchnl_dcf_vlan_offload vlan_offload;
+ int err;
+
+ if (!ice_dcf_vlan_offload_ena(repr))
+ return -ENOTSUP;
+
+ memset(&vlan_offload, 0, sizeof(vlan_offload));
+
+ vlan_offload.vf_id = repr->vf_id;
+ vlan_offload.tpid = repr->outer_vlan_tpid;
+ vlan_offload.vlan_flags = (VIRTCHNL_DCF_VLAN_TYPE_OUTER <<
+ VIRTCHNL_DCF_VLAN_TYPE_S) |
+ (VIRTCHNL_DCF_VLAN_INSERT_PORT_BASED <<
+ VIRTCHNL_DCF_VLAN_INSERT_MODE_S);
+ vlan_offload.vlan_id = on ? pvid : 0;
+
+ err = ice_dcf_vlan_offload_config(repr, &vlan_offload);
+ if (!err) {
+ repr->pvid = vlan_offload.vlan_id;
+ repr->hw_vlan_insert_pvid = on ? 1 : 0;
+ }
+
+ return err;
+}
+
+static int
+ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+ struct rte_eth_rxmode *rxmode;
+
+ if (!ice_dcf_vlan_offload_ena(repr))
+ return -ENOTSUP;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND))
+ ice_dcf_vf_repr_vlan_pvid_set(dev, 0, 0);
+ }
+
+ return 0;
+}
+
+static int
+ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type, uint16_t tpid)
+{
+ struct ice_dcf_vf_repr *repr = dev->data->dev_private;
+
+ if (!ice_dcf_vlan_offload_ena(repr))
+ return -ENOTSUP;
+
+ if (vlan_type != ETH_VLAN_TYPE_INNER &&
+ vlan_type != ETH_VLAN_TYPE_OUTER) {
+ PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type);
+ return -EINVAL;
+ }
+
+ if (vlan_type == ETH_VLAN_TYPE_INNER) {
+ PMD_DRV_LOG(ERR,
+ "Can accelerate only outer VLAN in QinQ\n");
+ return -EINVAL;
+ }
+
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)) {
+ PMD_DRV_LOG(ERR,
+ "QinQ not enabled.");
+ return -EINVAL;
+ }
+
+ if (tpid != RTE_ETHER_TYPE_QINQ ||
+ tpid != RTE_ETHER_TYPE_VLAN ||
+ tpid != RTE_ETHER_TYPE_QINQ1) {
+ PMD_DRV_LOG(ERR,
+ "Invalid TPID: 0x%04x\n", tpid);
+ return -EINVAL;
+ }
+
+ repr->outer_vlan_tpid = tpid;
+
+ return ice_dcf_vf_repr_vlan_pvid_set(dev,
+ repr->pvid,
+ repr->hw_vlan_insert_pvid);
+}
+
+static const struct eth_dev_ops ice_dcf_vf_repr_dev_ops = {
+ .dev_configure = ice_dcf_vf_repr_dev_configure,
+ .dev_start = ice_dcf_vf_repr_dev_start,
+ .dev_stop = ice_dcf_vf_repr_dev_stop,
+ .dev_close = ice_dcf_vf_repr_dev_close,
+ .dev_infos_get = ice_dcf_vf_repr_dev_info_get,
+ .rx_queue_setup = ice_dcf_vf_repr_rx_queue_setup,
+ .tx_queue_setup = ice_dcf_vf_repr_tx_queue_setup,
+ .promiscuous_enable = ice_dcf_vf_repr_promiscuous_enable,
+ .promiscuous_disable = ice_dcf_vf_repr_promiscuous_disable,
+ .allmulticast_enable = ice_dcf_vf_repr_allmulticast_enable,
+ .allmulticast_disable = ice_dcf_vf_repr_allmulticast_disable,
+ .link_update = ice_dcf_vf_repr_link_update,
+ .vlan_offload_set = ice_dcf_vf_repr_vlan_offload_set,
+ .vlan_pvid_set = ice_dcf_vf_repr_vlan_pvid_set,
+ .vlan_tpid_set = ice_dcf_vf_repr_vlan_tpid_set,
+};
+
+int
+ice_dcf_vf_repr_init(struct rte_eth_dev *ethdev, void *init_param)
+{
+ struct ice_dcf_vf_repr *repr = ethdev->data->dev_private;
+ struct ice_dcf_vf_repr_param *param = init_param;
+
+ repr->dcf_adapter = param->adapter;
+ repr->switch_domain_id = param->switch_domain_id;
+ repr->vf_id = param->vf_id;
+ repr->outer_vlan_tpid = RTE_ETHER_TYPE_VLAN;
+
+ ethdev->dev_ops = &ice_dcf_vf_repr_dev_ops;
+
+ ethdev->rx_pkt_burst = ice_dcf_vf_repr_rx_burst;
+ ethdev->tx_pkt_burst = ice_dcf_vf_repr_tx_burst;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ ethdev->data->representor_id = repr->vf_id;
+
+ ethdev->data->mac_addrs = &repr->mac_addr;
+
+ rte_eth_random_addr(repr->mac_addr.addr_bytes);
+
+ return 0;
+}
+
+int
+ice_dcf_vf_repr_uninit(struct rte_eth_dev *ethdev)
+{
+ ethdev->data->mac_addrs = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 7b291269d..d58936089 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -61,6 +61,7 @@ if arch_subdir == 'x86'
endif
sources += files('ice_dcf.c',
+ 'ice_dcf_vf_representor.c',
'ice_dcf_ethdev.c',
'ice_dcf_parent.c')
--
2.29.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH v1 5/5] net/iavf: support new VLAN virtchnl opcodes
2020-12-14 7:07 [dpdk-dev] [PATCH v1 1/5] common/iavf: new VLAN opcode Haiyue Wang
` (2 preceding siblings ...)
2020-12-14 7:07 ` [dpdk-dev] [PATCH v1 4/5] net/ice: add DCF port representor Haiyue Wang
@ 2020-12-14 7:07 ` Haiyue Wang
3 siblings, 0 replies; 5+ messages in thread
From: Haiyue Wang @ 2020-12-14 7:07 UTC (permalink / raw)
To: dev; +Cc: qiming.yang, jingjing.wu, qi.z.zhang, Haiyue Wang, Beilei Xing
The new VLAN virtchnl opcodes introduce rich capabilities setting like
outer/inner different TPIDs.
Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
drivers/net/iavf/iavf.h | 6 ++
drivers/net/iavf/iavf_ethdev.c | 46 +++++++++++
drivers/net/iavf/iavf_vchnl.c | 147 +++++++++++++++++++++++++++++++++
3 files changed, 199 insertions(+)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 6d5912d8c..c57ad6175 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -132,6 +132,7 @@ struct iavf_info {
struct virtchnl_version_info virtchnl_version;
struct virtchnl_vf_resource *vf_res; /* VF resource */
struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+ struct virtchnl_vlan_caps vlan_v2_caps;
uint64_t supported_rxdid;
uint8_t *proto_xtr; /* proto xtr type for all queues */
volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
@@ -288,7 +289,9 @@ int iavf_check_api_version(struct iavf_adapter *adapter);
int iavf_get_vf_resource(struct iavf_adapter *adapter);
void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
+int iavf_enable_vlan_strip_v2(struct iavf_adapter *adapter);
int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
+int iavf_disable_vlan_strip_v2(struct iavf_adapter *adapter);
int iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid,
bool rx, bool on);
int iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid,
@@ -302,6 +305,7 @@ int iavf_configure_rss_key(struct iavf_adapter *adapter);
int iavf_configure_queues(struct iavf_adapter *adapter,
uint16_t num_queue_pairs, uint16_t index);
int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
+int iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter);
int iavf_config_irq_map(struct iavf_adapter *adapter);
int iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num,
uint16_t index);
@@ -315,6 +319,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
struct rte_ether_addr *addr, bool add);
int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
+int iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid,
+ bool add);
int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
int iavf_fdir_check(struct iavf_adapter *adapter,
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 0fd06e4b4..4d946013c 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -384,6 +384,14 @@ iavf_dev_configure(struct rte_eth_dev *dev)
vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT;
}
+ /* Vlan stripping setting v2 */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ iavf_enable_vlan_strip_v2(ad);
+ else
+ iavf_disable_vlan_strip_v2(ad);
+ }
+
/* Vlan stripping setting */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
@@ -992,6 +1000,13 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
int err;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ err = iavf_add_del_vlan_v2(adapter, vlan_id, on);
+ if (err)
+ return -EIO;
+ return 0;
+ }
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
return -ENOTSUP;
@@ -1001,6 +1016,27 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return 0;
}
+static int
+iavf_dev_vlan_offload_set_v2(struct rte_eth_dev *dev, int mask)
+{
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ int err;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ err = iavf_enable_vlan_strip_v2(adapter);
+ else
+ err = iavf_disable_vlan_strip_v2(adapter);
+
+ if (err)
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int
iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
@@ -1010,6 +1046,9 @@ iavf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
int err;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+ return iavf_dev_vlan_offload_set_v2(dev, mask);
+
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
return -ENOTSUP;
@@ -1861,6 +1900,13 @@ iavf_init_vf(struct rte_eth_dev *dev)
}
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ if (iavf_get_vlan_offload_caps_v2(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "failed to do get VLAN offload v2 capabilities");
+ goto err_rss;
+ }
+ }
+
iavf_init_proto_xtr(dev);
return 0;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 3f949c9e3..6b571924f 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -174,6 +174,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
case VIRTCHNL_OP_VERSION:
case VIRTCHNL_OP_GET_VF_RESOURCES:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
/* for init virtchnl ops, need to poll the response */
do {
result = iavf_read_msg_from_pf(adapter, args->out_size,
@@ -366,6 +367,45 @@ iavf_enable_vlan_strip(struct iavf_adapter *adapter)
return ret;
}
+int
+iavf_enable_vlan_strip_v2(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_offload_caps *offload_caps;
+ struct virtchnl_vlan_strip vlan_strip;
+ struct iavf_cmd_info args;
+ bool outer;
+ int ret;
+
+ offload_caps = &vf->vlan_v2_caps.offloads;
+ if (offload_caps->outer_stripping & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = true;
+ else if (offload_caps->inner_stripping & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = false;
+ else
+ return -ENOTSUP;
+
+ memset(&vlan_strip, 0, sizeof(vlan_strip));
+ vlan_strip.vsi_id = vf->vsi_res->vsi_id;
+ if (outer)
+ vlan_strip.outer_ethertype_setting =
+ VIRTCHNL_VLAN_ETHERTYPE_8100;
+ else
+ vlan_strip.inner_ethertype_setting =
+ VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ args.ops = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2;
+ args.in_args = (uint8_t *)&vlan_strip;
+ args.in_args_size = sizeof(vlan_strip);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "fail to execute command VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2");
+
+ return ret;
+}
+
int
iavf_disable_vlan_strip(struct iavf_adapter *adapter)
{
@@ -387,6 +427,45 @@ iavf_disable_vlan_strip(struct iavf_adapter *adapter)
return ret;
}
+int
+iavf_disable_vlan_strip_v2(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_offload_caps *offload_caps;
+ struct virtchnl_vlan_strip vlan_strip;
+ struct iavf_cmd_info args;
+ bool outer;
+ int ret;
+
+ offload_caps = &vf->vlan_v2_caps.offloads;
+ if (offload_caps->outer_stripping & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = true;
+ else if (offload_caps->inner_stripping & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = false;
+ else
+ return -ENOTSUP;
+
+ memset(&vlan_strip, 0, sizeof(vlan_strip));
+ vlan_strip.vsi_id = vf->vsi_res->vsi_id;
+ if (outer)
+ vlan_strip.outer_ethertype_setting =
+ VIRTCHNL_VLAN_ETHERTYPE_8100;
+ else
+ vlan_strip.inner_ethertype_setting =
+ VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ args.ops = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2;
+ args.in_args = (uint8_t *)&vlan_strip;
+ args.in_args_size = sizeof(vlan_strip);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "fail to execute command VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2");
+
+ return ret;
+}
+
#define VIRTCHNL_VERSION_MAJOR_START 1
#define VIRTCHNL_VERSION_MINOR_START 1
@@ -459,6 +538,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_CRC |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_LARGE_NUM_QPAIRS;
args.in_args = (uint8_t *)∩︀
@@ -522,6 +602,31 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter)
return 0;
}
+int
+iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct iavf_cmd_info args;
+ int ret;
+
+ args.ops = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
+ args.in_args = NULL;
+ args.in_args_size = 0;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ ret = iavf_execute_vf_cmd(adapter, &args);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS");
+ return ret;
+ }
+
+ rte_memcpy(&vf->vlan_v2_caps, vf->aq_resp, sizeof(vf->vlan_v2_caps));
+
+ return 0;
+}
+
int
iavf_enable_queues(struct iavf_adapter *adapter)
{
@@ -1165,6 +1270,48 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
return err;
}
+int
+iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_vlan_filtering_caps *filtering_caps;
+ struct virtchnl_vlan_filter_list_v2 vlan_list;
+ struct iavf_cmd_info args;
+ bool outer;
+ int err;
+
+ filtering_caps = &vf->vlan_v2_caps.filtering;
+ if (filtering_caps->outer & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = true;
+ else if (filtering_caps->inner & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ outer = false;
+ else
+ return -ENOTSUP;
+
+ memset(&vlan_list, 0, sizeof(vlan_list));
+ vlan_list.num_elements = 1;
+
+ if (outer) {
+ vlan_list.filters[0].outer.tci = vlanid;
+ vlan_list.filters[0].outer.tpid = RTE_ETHER_TYPE_VLAN;
+ } else {
+ vlan_list.filters[0].inner.tci = vlanid;
+ vlan_list.filters[0].inner.tpid = RTE_ETHER_TYPE_VLAN;
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_VLAN_V2 : VIRTCHNL_OP_DEL_VLAN_V2;
+ args.in_args = (uint8_t *)&vlan_list;
+ args.in_args_size = sizeof(vlan_list);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+ err = iavf_execute_vf_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2");
+
+ return err;
+}
+
int
iavf_fdir_add(struct iavf_adapter *adapter,
struct iavf_fdir_conf *filter)
--
2.29.2
^ permalink raw reply [flat|nested] 5+ messages in thread