From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 010A5A00C3; Sun, 7 Jun 2020 18:41:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 565F51B6B4; Sun, 7 Jun 2020 18:41:27 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id C123537AF for ; Sun, 7 Jun 2020 18:41:25 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id 057GfMTS004978; Sun, 7 Jun 2020 09:41:22 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : mime-version : content-type; s=pfpt0818; bh=80pwl3u5vcDB66ZWResXUzjYOC+IPXT1YN1RzW/NHX0=; b=aaDh69SkqhLxpQcnguShOJFlfx/jaKEsIPF33PFU3XOReEzQz73vbePN8EZstFHoGZ+8 mpaE2RbRFIpdNSIdBtMytJQH3VAzmIGtOwfjzg/sjl/DQCASfNAmfuXDNE0fvMp6E8zW mXQB6GuSKxT0pg709OP9SJxYfJxFy/BI1eJRyg54nZ+3NRk3hhUKw9YlzEHiyjfhhtd0 ZepDq9vAiLbkNHpN8Yo04/yzGRW6SZ695wCSsZW92kcbRi1n1Hw7yKBdclnmx14pxHuN I4QtgsZzDtOBXtN+1bctTBdmNNQAbOqbrNoHn5r51A+aWNNom18vK3kQ6zUhD5vY8JsX vQ== Received: from sc-exch02.marvell.com ([199.233.58.182]) by mx0b-0016f401.pphosted.com with ESMTP id 31gann2u7p-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Sun, 07 Jun 2020 09:41:21 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by SC-EXCH02.marvell.com (10.93.176.82) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Sun, 7 Jun 2020 09:41:20 -0700 Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Sun, 7 Jun 2020 09:41:20 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Sun, 7 Jun 2020 09:41:20 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id CBFDB3F7041; Sun, 7 Jun 2020 09:41:16 -0700 (PDT) From: Nithin Dabilpuram To: , , , , , Nithin Dabilpuram , Pavan Nikhilesh CC: , Andrzej Ostruszka , Date: Sun, 7 Jun 2020 22:10:42 +0530 Message-ID: <20200607164042.24910-1-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.216, 18.0.687 definitions=2020-06-07_10:2020-06-04, 2020-06-07 signatures=0 Subject: [dpdk-dev] [PATCH] node: add pkt classifier node X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This node classifies pkts based on packet type and sends them to appropriate next node. This is node helps in distribution of packets from ethdev_rx node to different next node with a constant overhead for all packet types. Currently all except non fragmented IPV4 packets are marked to be sent to "pkt_drop" node. Performance difference on ARM64 Octeontx2 is -4.9% due to addition of new node in the path. Signed-off-by: Nithin Dabilpuram --- lib/librte_node/Makefile | 1 + lib/librte_node/ethdev_rx.c | 20 +++- lib/librte_node/ethdev_rx_priv.h | 2 + lib/librte_node/ip4_lookup_neon.h | 1 - lib/librte_node/meson.build | 2 +- lib/librte_node/node_private.h | 2 + lib/librte_node/pkt_cls.c | 225 ++++++++++++++++++++++++++++++++++++++ lib/librte_node/pkt_cls_priv.h | 27 +++++ 8 files changed, 273 insertions(+), 7 deletions(-) create mode 100644 lib/librte_node/pkt_cls.c create mode 100644 lib/librte_node/pkt_cls_priv.h diff --git a/lib/librte_node/Makefile b/lib/librte_node/Makefile index 9dee8b4..3ec0210 100644 --- a/lib/librte_node/Makefile +++ b/lib/librte_node/Makefile @@ -23,6 +23,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ethdev_tx.c SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ethdev_ctrl.c SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ip4_lookup.c SRCS-$(CONFIG_RTE_LIBRTE_NODE) += ip4_rewrite.c +SRCS-$(CONFIG_RTE_LIBRTE_NODE) += pkt_cls.c SRCS-$(CONFIG_RTE_LIBRTE_NODE) += pkt_drop.c # install header files diff --git a/lib/librte_node/ethdev_rx.c b/lib/librte_node/ethdev_rx.c index 5cc7365..4c23961 100644 --- a/lib/librte_node/ethdev_rx.c +++ b/lib/librte_node/ethdev_rx.c @@ -16,9 +16,14 @@ static struct ethdev_rx_node_main ethdev_rx_main; static __rte_always_inline uint16_t ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node, - uint16_t port, uint16_t queue) + ethdev_rx_node_ctx_t *ctx) { - uint16_t count, next_index = ETHDEV_RX_NEXT_IP4_LOOKUP; + uint16_t count, next_index; + uint16_t port, queue; + + port = ctx->port_id; + queue = ctx->queue_id; + next_index = ctx->cls_next; /* Get pkts from port */ count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs, @@ -43,8 +48,7 @@ ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node, RTE_SET_USED(objs); RTE_SET_USED(cnt); - n_pkts = ethdev_rx_node_process_inline(graph, node, ctx->port_id, - ctx->queue_id); + n_pkts = ethdev_rx_node_process_inline(graph, node, ctx); return n_pkts; } @@ -191,6 +195,8 @@ ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node) RTE_VERIFY(elem != NULL); + ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS; + /* Check and setup ptype */ return ethdev_ptype_setup(ctx->port_id, ctx->queue_id); } @@ -209,7 +215,11 @@ static struct rte_node_register ethdev_rx_node_base = { .init = ethdev_rx_node_init, .nb_edges = ETHDEV_RX_NEXT_MAX, - .next_nodes = {[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup"}, + .next_nodes = { + /* Default pkt classification node */ + [ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls", + [ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup", + }, }; struct rte_node_register * diff --git a/lib/librte_node/ethdev_rx_priv.h b/lib/librte_node/ethdev_rx_priv.h index 2d7195a..efcd66d 100644 --- a/lib/librte_node/ethdev_rx_priv.h +++ b/lib/librte_node/ethdev_rx_priv.h @@ -23,6 +23,7 @@ typedef struct ethdev_rx_node_ctx ethdev_rx_node_ctx_t; struct ethdev_rx_node_ctx { uint16_t port_id; /**< Port identifier of the Rx node. */ uint16_t queue_id; /**< Queue identifier of the Rx node. */ + uint16_t cls_next; }; /** @@ -41,6 +42,7 @@ struct ethdev_rx_node_elem { enum ethdev_rx_next_nodes { ETHDEV_RX_NEXT_IP4_LOOKUP, + ETHDEV_RX_NEXT_PKT_CLS, ETHDEV_RX_NEXT_MAX, }; diff --git a/lib/librte_node/ip4_lookup_neon.h b/lib/librte_node/ip4_lookup_neon.h index dd21cb2..5e5a7d8 100644 --- a/lib/librte_node/ip4_lookup_neon.h +++ b/lib/librte_node/ip4_lookup_neon.h @@ -37,7 +37,6 @@ ip4_lookup_node_process(struct rte_graph *graph, struct rte_node *node, from = objs; n_left_from = nb_objs; -#define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *)) for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE) rte_prefetch0(&objs[i]); diff --git a/lib/librte_node/meson.build b/lib/librte_node/meson.build index 17f1316..3d582f6 100644 --- a/lib/librte_node/meson.build +++ b/lib/librte_node/meson.build @@ -2,7 +2,7 @@ # Copyright(C) 2020 Marvell International Ltd. sources = files('null.c', 'log.c', 'ethdev_rx.c', 'ethdev_tx.c', 'ip4_lookup.c', - 'ip4_rewrite.c', 'pkt_drop.c', 'ethdev_ctrl.c') + 'ip4_rewrite.c', 'pkt_drop.c', 'ethdev_ctrl.c', 'pkt_cls.c') headers = files('rte_node_ip4_api.h', 'rte_node_eth_api.h') # Strict-aliasing rules are violated by uint8_t[] to context size casts. cflags += '-fno-strict-aliasing' diff --git a/lib/librte_node/node_private.h b/lib/librte_node/node_private.h index 975b9aa..ab7941c 100644 --- a/lib/librte_node/node_private.h +++ b/lib/librte_node/node_private.h @@ -46,6 +46,8 @@ struct node_mbuf_priv2 { #define NODE_MBUF_PRIV2_SIZE sizeof(struct node_mbuf_priv2) +#define OBJS_PER_CLINE (RTE_CACHE_LINE_SIZE / sizeof(void *)) + /** * Get mbuf_priv1 pointer from rte_mbuf. * diff --git a/lib/librte_node/pkt_cls.c b/lib/librte_node/pkt_cls.c new file mode 100644 index 0000000..b95454d --- /dev/null +++ b/lib/librte_node/pkt_cls.c @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell. + */ + +#include +#include +#include +#include +#include +#include + +#include "pkt_cls_priv.h" +#include "node_private.h" + +/* Next node for each ptype, default is '0' is "pkt_drop" */ +static const uint8_t p_nxt[256] __rte_cache_aligned = { + [RTE_PTYPE_L3_IPV4] = PKT_CLS_NEXT_IP4_LOOKUP, + + [RTE_PTYPE_L3_IPV4_EXT] = PKT_CLS_NEXT_IP4_LOOKUP, + + [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN] = PKT_CLS_NEXT_IP4_LOOKUP, + + [RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER] = + PKT_CLS_NEXT_IP4_LOOKUP, + + [RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER] = + PKT_CLS_NEXT_IP4_LOOKUP, + + [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] = + PKT_CLS_NEXT_IP4_LOOKUP, +}; + +static uint16_t +pkt_cls_node_process(struct rte_graph *graph, struct rte_node *node, + void **objs, uint16_t nb_objs) +{ + struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts; + uint8_t l0, l1, l2, l3, last_type; + uint16_t next_index, n_left_from; + uint16_t held = 0, last_spec = 0; + struct pkt_cls_node_ctx *ctx; + void **to_next, **from; + uint32_t i; + + pkts = (struct rte_mbuf **)objs; + from = objs; + n_left_from = nb_objs; + + for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE) + rte_prefetch0(&objs[i]); + +#if RTE_GRAPH_BURST_SIZE > 64 + for (i = 0; i < 4 && i < n_left_from; i++) + rte_prefetch0(pkts[i]); +#endif + + ctx = (struct pkt_cls_node_ctx *)node->ctx; + last_type = ctx->l2l3_type; + next_index = p_nxt[last_type]; + + /* Get stream for the speculated next node */ + to_next = rte_node_next_stream_get(graph, node, + next_index, nb_objs); + while (n_left_from >= 4) { +#if RTE_GRAPH_BURST_SIZE > 64 + if (likely(n_left_from > 7)) { + rte_prefetch0(pkts[4]); + rte_prefetch0(pkts[5]); + rte_prefetch0(pkts[6]); + rte_prefetch0(pkts[7]); + } +#endif + + mbuf0 = pkts[0]; + mbuf1 = pkts[1]; + mbuf2 = pkts[2]; + mbuf3 = pkts[3]; + pkts += 4; + n_left_from -= 4; + + l0 = mbuf0->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + l1 = mbuf1->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + l2 = mbuf2->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + l3 = mbuf3->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + + /* Check if they are destined to same + * next node based on l2l3 packet type. + */ + uint8_t fix_spec = (last_type ^ l0) | (last_type ^ l1) | + (last_type ^ l2) | (last_type ^ l3); + + if (unlikely(fix_spec)) { + /* Copy things successfully speculated till now */ + rte_memcpy(to_next, from, + last_spec * sizeof(from[0])); + from += last_spec; + to_next += last_spec; + held += last_spec; + last_spec = 0; + + /* l0 */ + if (p_nxt[l0] == next_index) { + to_next[0] = from[0]; + to_next++; + held++; + } else { + rte_node_enqueue_x1(graph, node, + p_nxt[l0], from[0]); + } + + /* l1 */ + if (p_nxt[l1] == next_index) { + to_next[0] = from[1]; + to_next++; + held++; + } else { + rte_node_enqueue_x1(graph, node, + p_nxt[l1], from[1]); + } + + /* l2 */ + if (p_nxt[l2] == next_index) { + to_next[0] = from[2]; + to_next++; + held++; + } else { + rte_node_enqueue_x1(graph, node, + p_nxt[l2], from[2]); + } + + /* l3 */ + if (p_nxt[l3] == next_index) { + to_next[0] = from[3]; + to_next++; + held++; + } else { + rte_node_enqueue_x1(graph, node, + p_nxt[l3], from[3]); + } + + /* Update speculated ptype */ + if ((last_type != l3) && (l2 == l3) && + (next_index != p_nxt[l3])) { + /* Put the current stream for + * speculated ltype. + */ + rte_node_next_stream_put(graph, node, + next_index, held); + + held = 0; + + /* Get next stream for new ltype */ + next_index = p_nxt[l3]; + last_type = l3; + to_next = rte_node_next_stream_get(graph, node, + next_index, + nb_objs); + } else if (next_index == p_nxt[l3]) { + last_type = l3; + } + + from += 4; + } else { + last_spec += 4; + } + } + + while (n_left_from > 0) { + mbuf0 = pkts[0]; + + pkts += 1; + n_left_from -= 1; + + l0 = mbuf0->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK); + if (unlikely((l0 != last_type) && + (p_nxt[l0] != next_index))) { + /* Copy things successfully speculated till now */ + rte_memcpy(to_next, from, + last_spec * sizeof(from[0])); + from += last_spec; + to_next += last_spec; + held += last_spec; + last_spec = 0; + + rte_node_enqueue_x1(graph, node, + p_nxt[l0], from[0]); + from += 1; + } else { + last_spec += 1; + } + } + + /* !!! Home run !!! */ + if (likely(last_spec == nb_objs)) { + rte_node_next_stream_move(graph, node, next_index); + return nb_objs; + } + + held += last_spec; + /* Copy things successfully speculated till now */ + rte_memcpy(to_next, from, last_spec * sizeof(from[0])); + rte_node_next_stream_put(graph, node, next_index, held); + + ctx->l2l3_type = last_type; + return nb_objs; +} + +/* Packet Classification Node */ +struct rte_node_register pkt_cls_node = { + .process = pkt_cls_node_process, + .name = "pkt_cls", + + .nb_edges = PKT_CLS_NEXT_MAX, + .next_nodes = { + /* Pkt drop node starts at '0' */ + [PKT_CLS_NEXT_PKT_DROP] = "pkt_drop", + [PKT_CLS_NEXT_IP4_LOOKUP] = "ip4_lookup", + }, +}; +RTE_NODE_REGISTER(pkt_cls_node); diff --git a/lib/librte_node/pkt_cls_priv.h b/lib/librte_node/pkt_cls_priv.h new file mode 100644 index 0000000..3f2bb04 --- /dev/null +++ b/lib/librte_node/pkt_cls_priv.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2020 Marvell. + */ +#ifndef __INCLUDE_PKT_CLS_PRIV_H__ +#define __INCLUDE_PKT_CLS_PRIV_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +struct pkt_cls_node_ctx { + uint16_t l2l3_type; +}; + +enum pkt_cls_next_nodes { + PKT_CLS_NEXT_PKT_DROP, + PKT_CLS_NEXT_IP4_LOOKUP, + PKT_CLS_NEXT_MAX, +}; + +#ifdef __cplusplus +} +#endif + +#endif /* __INCLUDE_PKT_CLS_PRIV_H__ */ -- 2.8.4