From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 63C5EA0542; Sun, 9 Oct 2022 11:01:55 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4F724400D5; Sun, 9 Oct 2022 11:01:55 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 44F7440042 for ; Sun, 9 Oct 2022 11:01:54 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1665306114; x=1696842114; h=from:to:cc:subject:date:message-id:mime-version: content-transfer-encoding; bh=WMjpNfYqLTJCeTaYrJ6oYReI3HMi/vV1iRuow87TwIw=; b=FF4vbGRv8dw4uQkFBR/Yc5IO8rTIfCyYaLmlwIxf33fkBEdnn1gtzbg3 Y4mOGapW/v+dCUj+CsXPIIJjjH85jvHUtiMzsqV4Sf3YA5F90g0YMnCyr 6fxk2jbiQzhLvOYKQSdgFIZahdRz1JpHCSyPtuNrzXz55UasZfRHW3ADI K2ckVyyYexaikCqDb8XlRLV3VVKZfZt2iGa+YDnVGSt7TZTpZpTg9atY+ KYolwb5RUC37L98RzYNqBcMm/onmmszmZtzuWbFui5V6dwY9VsXZXeByK sdeuVJJ6iciBHVz0opi1kEsBsYI0EdJMbOMT7gkizEd18PEpgaLdtnysL g==; X-IronPort-AV: E=McAfee;i="6500,9779,10494"; a="284416735" X-IronPort-AV: E=Sophos;i="5.95,171,1661842800"; d="scan'208";a="284416735" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Oct 2022 02:01:53 -0700 X-IronPort-AV: E=McAfee;i="6500,9779,10494"; a="768069077" X-IronPort-AV: E=Sophos;i="5.95,171,1661842800"; d="scan'208";a="768069077" Received: from intel-cd-odc-gavin.cd.intel.com ([10.240.178.187]) by fmsmga001-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Oct 2022 02:01:51 -0700 From: Jie Wang To: dev@dpdk.org Cc: stevex.yang@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Jie Wang Subject: [PATCH] net/iavf: support raw packet for flow subscription Date: Sun, 9 Oct 2022 17:00:37 +0800 Message-Id: <20221009090037.78015-1-jie1x.wang@intel.com> X-Mailer: git-send-email 2.25.1 MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add Protocol Agnostic Flow (raw flow) support for flow subscription in AVF. For example, testpmd creates a flow subscription raw packet rule: rule: eth + ipv4 src is 1.1.1.1 dst is 2.2.2.2 cmd: flow create 0 ingress pattern raw pattern spec \ 00000000000000000000000008004500001400000000000000000101010102020202 \ pattern mask \ 0000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFF \ / end actions port_representor port_id 0 / passthru / end Signed-off-by: Jie Wang --- drivers/net/iavf/iavf_fsub.c | 99 +++++++++++++++++++++++++++++++----- 1 file changed, 87 insertions(+), 12 deletions(-) diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c index 3be75923a5..ee162f575f 100644 --- a/drivers/net/iavf/iavf_fsub.c +++ b/drivers/net/iavf/iavf_fsub.c @@ -57,6 +57,7 @@ static struct iavf_flow_parser iavf_fsub_parser; static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = { + {iavf_pattern_raw, IAVF_INSET_NONE, IAVF_INSET_NONE}, {iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE}, {iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE}, {iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE}, @@ -153,6 +154,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], { struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs; enum rte_flow_item_type item_type; + const struct rte_flow_item_raw *raw_spec, *raw_mask; const struct rte_flow_item_eth *eth_spec, *eth_mask; const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; @@ -164,20 +166,83 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], uint64_t outer_input_set = IAVF_INSET_NONE; uint64_t *input = NULL; uint16_t input_set_byte = 0; - uint16_t j; + uint8_t item_num = 0; uint32_t layer = 0; - for (item = pattern; item->type != - RTE_FLOW_ITEM_TYPE_END; item++) { + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->last) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Not support range"); - return false; + return -rte_errno; } + item_type = item->type; + item_num++; switch (item_type) { + case RTE_FLOW_ITEM_TYPE_RAW: { + raw_spec = item->spec; + raw_mask = item->mask; + + if (item_num != 1) + return -rte_errno; + + if (raw_spec->length != raw_mask->length) + return -rte_errno; + + uint16_t pkt_len = 0; + uint16_t tmp_val = 0; + uint8_t tmp = 0; + int i, j; + + pkt_len = raw_spec->length; + + for (i = 0, j = 0; i < pkt_len; i += 2, j++) { + tmp = raw_spec->pattern[i]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val = tmp - 'a' + 10; + if (tmp >= 'A' && tmp <= 'F') + tmp_val = tmp - 'A' + 10; + if (tmp >= '0' && tmp <= '9') + tmp_val = tmp - '0'; + + tmp_val *= 16; + tmp = raw_spec->pattern[i + 1]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val += (tmp - 'a' + 10); + if (tmp >= 'A' && tmp <= 'F') + tmp_val += (tmp - 'A' + 10); + if (tmp >= '0' && tmp <= '9') + tmp_val += (tmp - '0'); + + hdrs->raw.spec[j] = tmp_val; + + tmp = raw_mask->pattern[i]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val = tmp - 'a' + 10; + if (tmp >= 'A' && tmp <= 'F') + tmp_val = tmp - 'A' + 10; + if (tmp >= '0' && tmp <= '9') + tmp_val = tmp - '0'; + + tmp_val *= 16; + tmp = raw_mask->pattern[i + 1]; + if (tmp >= 'a' && tmp <= 'f') + tmp_val += (tmp - 'a' + 10); + if (tmp >= 'A' && tmp <= 'F') + tmp_val += (tmp - 'A' + 10); + if (tmp >= '0' && tmp <= '9') + tmp_val += (tmp - '0'); + + hdrs->raw.mask[j] = tmp_val; + } + + hdrs->raw.pkt_len = pkt_len / 2; + hdrs->tunnel_level = 0; + hdrs->count = 0; + return 0; + } case RTE_FLOW_ITEM_TYPE_ETH: eth_spec = item->spec; eth_mask = item->mask; @@ -236,7 +301,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv4 mask."); - return false; + return -rte_errno; } if (ipv4_mask->hdr.src_addr) { @@ -268,7 +333,9 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], hdrs->count = ++layer; break; - case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_IPV6: { + int j; + ipv6_spec = item->spec; ipv6_mask = item->mask; @@ -283,7 +350,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv6 mask"); - return false; + return -rte_errno; } for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) { @@ -329,6 +396,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], hdrs->count = ++layer; break; + } case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; @@ -345,7 +413,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid UDP mask"); - return false; + return -rte_errno; } if (udp_mask->hdr.src_port) { @@ -386,7 +454,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid TCP mask"); - return false; + return -rte_errno; } if (tcp_mask->hdr.src_port) { @@ -425,9 +493,8 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[], if (vlan_mask->inner_type) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VLAN input set."); - return false; + item, "Invalid VLAN input set."); + return -rte_errno; } rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr, @@ -494,6 +561,13 @@ iavf_fsub_parse_action(struct iavf_adapter *ad, if (rule_port_id != act_ethdev->port_id) goto error1; + filter->sub_fltr.actions.count = ++num; + break; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: + dest_num++; + filter_action = &filter->sub_fltr.actions.actions[num]; + + filter_action->type = VIRTCHNL_ACTION_PASSTHRU; filter->sub_fltr.actions.count = ++num; break; case RTE_FLOW_ACTION_TYPE_QUEUE: @@ -615,6 +689,7 @@ iavf_fsub_check_action(const struct rte_flow_action *actions, vf_valid = true; actions_num++; break; + case RTE_FLOW_ACTION_TYPE_PASSTHRU: case RTE_FLOW_ACTION_TYPE_RSS: case RTE_FLOW_ACTION_TYPE_QUEUE: queue_valid = true; -- 2.25.1