From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DC74243F69; Thu, 2 May 2024 17:29:07 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A495B402EB; Thu, 2 May 2024 17:29:02 +0200 (CEST) Received: from forward500b.mail.yandex.net (forward500b.mail.yandex.net [178.154.239.144]) by mails.dpdk.org (Postfix) with ESMTP id 715EE402EA for ; Thu, 2 May 2024 17:29:00 +0200 (CEST) Received: from mail-nwsmtp-smtp-production-main-37.myt.yp-c.yandex.net (mail-nwsmtp-smtp-production-main-37.myt.yp-c.yandex.net [IPv6:2a02:6b8:c12:1a4:0:640:b6:0]) by forward500b.mail.yandex.net (Yandex) with ESMTPS id B875661172; Thu, 2 May 2024 18:28:59 +0300 (MSK) Received: by mail-nwsmtp-smtp-production-main-37.myt.yp-c.yandex.net (smtp/Yandex) with ESMTPSA id bSVvo2YOeuQ0-lEzH3CpT; Thu, 02 May 2024 18:28:59 +0300 X-Yandex-Fwd: 1 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=yandex.ru; s=mail; t=1714663739; bh=Vvj4KGXcme3XerKhGKN835pU5tY3dH4WX+Xofk7aDJg=; h=Message-Id:Date:In-Reply-To:Cc:Subject:References:To:From; b=vH/nENlkejnyDVlZNRfF1U+MLSCV4ZqB5y6NqzyVc/4ggvxQ3B6cNBkgttoQTkTQX ElVvM8WAAYO0hM/8qLnDTUw+ObswjJWtbTARQ7ZVq4o+DM9dYJrSHAiKXhrm8uUbJ8 WrPGWFdZPSJvKvWMBkXMGxMGTAVolWho06L+Xxso= Authentication-Results: mail-nwsmtp-smtp-production-main-37.myt.yp-c.yandex.net; dkim=pass header.i=@yandex.ru From: Konstantin Ananyev To: dev@dpdk.org Cc: Konstantin Ananyev Subject: [PATCH 2/2] examples/l3fwd: avoid packets reorder in ACL mode Date: Thu, 2 May 2024 16:28:16 +0100 Message-Id: <20240502152816.65562-3-konstantin.v.ananyev@yandex.ru> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240502152816.65562-1-konstantin.v.ananyev@yandex.ru> References: <20240502152816.65562-1-konstantin.v.ananyev@yandex.ru> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Konstantin Ananyev In ACL mode l3fwd first do classify() and send() for ipv4 packets, then the same procedure for ipv6. That might cause packets reordering within one ingress queue. Probably not a big deal, as order within each flow are still preserved, but better to be avoided anyway. Specially considering that in other modes (lpm, fib, em) l3fwd does preserve the order no matter of packet's IP version. This patch aims to make ACL mode to behave in the same manner and preserve packet's order within the same ingress queue. Also these changes allow ACL mode to use common (and hopefully better optimized) send_packets_multi() function at TX path. Signed-off-by: Konstantin Ananyev --- examples/l3fwd/l3fwd_acl.c | 125 +++++++++++++++++++----------- examples/l3fwd/l3fwd_acl_scalar.h | 71 +++++++++-------- 2 files changed, 118 insertions(+), 78 deletions(-) diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c index d9e4ae543f..ab8222c9db 100644 --- a/examples/l3fwd/l3fwd_acl.c +++ b/examples/l3fwd/l3fwd_acl.c @@ -235,18 +235,6 @@ enum { RTE_ACL_RULE_DEF(acl4_rule, RTE_DIM(ipv4_defs)); RTE_ACL_RULE_DEF(acl6_rule, RTE_DIM(ipv6_defs)); -struct acl_search_t { - const uint8_t *data_ipv4[MAX_PKT_BURST]; - struct rte_mbuf *m_ipv4[MAX_PKT_BURST]; - uint32_t res_ipv4[MAX_PKT_BURST]; - int num_ipv4; - - const uint8_t *data_ipv6[MAX_PKT_BURST]; - struct rte_mbuf *m_ipv6[MAX_PKT_BURST]; - uint32_t res_ipv6[MAX_PKT_BURST]; - int num_ipv6; -}; - static struct { struct rte_acl_ctx *acx_ipv4[NB_SOCKETS]; struct rte_acl_ctx *acx_ipv6[NB_SOCKETS]; @@ -988,11 +976,86 @@ setup_acl(const int socket_id) } +static inline void +dump_denied_pkt(const struct rte_mbuf *pkt, uint32_t res) +{ +#ifdef L3FWDACL_DEBUG + if ((res & ACL_DENY_SIGNATURE) != 0) { + if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) + dump_acl4_rule(pkt, res); + else if (RTE_ETH_IS_IPV6_HDR(pkt[i]->packet_type)) + dump_acl6_rule(pkt[i], res[i]); + } +#else + RTE_SET_USED(pkt); + RTE_SET_USED(res); +#endif +} + +static inline void +acl_process_pkts(struct rte_mbuf *pkts[MAX_PKT_BURST], + uint16_t hops[MAX_PKT_BURST], uint32_t num, int32_t socketid) +{ + uint32_t i, n4, n6, res; + struct acl_search_t acl_search; + + /* split packets burst depending on packet type (IPv4/IPv6) */ + l3fwd_acl_prepare_acl_parameter(pkts, &acl_search, num); + + if (acl_search.num_ipv4) + rte_acl_classify(acl_config.acx_ipv4[socketid], + acl_search.data_ipv4, + acl_search.res_ipv4, + acl_search.num_ipv4, + DEFAULT_MAX_CATEGORIES); + + if (acl_search.num_ipv6) + rte_acl_classify(acl_config.acx_ipv6[socketid], + acl_search.data_ipv6, + acl_search.res_ipv6, + acl_search.num_ipv6, + DEFAULT_MAX_CATEGORIES); + + /* combine lookup results back, into one array of next hops */ + n4 = 0; + n6 = 0; + for (i = 0; i != num; i++) { + switch (acl_search.types[i]) { + case TYPE_IPV4: + res = acl_search.res_ipv4[n4++]; + break; + case TYPE_IPV6: + res = acl_search.res_ipv6[n6++]; + break; + default: + res = 0; + } + if (likely((res & ACL_DENY_SIGNATURE) == 0 && res != 0)) + hops[i] = res - FWD_PORT_SHIFT; + else { + hops[i] = BAD_PORT; + dump_denied_pkt(pkts[i], res); + } + } +} + +static inline void +acl_send_packets(struct lcore_conf *qconf, struct rte_mbuf *pkts[], + uint16_t hops[], uint32_t num) +{ +#if defined ACL_SEND_MULTI + send_packets_multi(qconf, pkts, hops, num); +#else + send_packets_single(qconf, pkts, hops, num); +#endif +} + /* main processing loop */ int acl_main_loop(__rte_unused void *dummy) { struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t hops[MAX_PKT_BURST]; unsigned int lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; @@ -1048,7 +1111,7 @@ acl_main_loop(__rte_unused void *dummy) } /* - * Read packet from RX queues + * Read packet from RX queues and process them */ for (i = 0; i < qconf->n_rx_queue; ++i) { @@ -1058,40 +1121,10 @@ acl_main_loop(__rte_unused void *dummy) pkts_burst, MAX_PKT_BURST); if (nb_rx > 0) { - struct acl_search_t acl_search; - - l3fwd_acl_prepare_acl_parameter(pkts_burst, &acl_search, + acl_process_pkts(pkts_burst, hops, nb_rx, + socketid); + acl_send_packets(qconf, pkts_burst, hops, nb_rx); - - if (acl_search.num_ipv4) { - rte_acl_classify( - acl_config.acx_ipv4[socketid], - acl_search.data_ipv4, - acl_search.res_ipv4, - acl_search.num_ipv4, - DEFAULT_MAX_CATEGORIES); - - l3fwd_acl_send_packets( - qconf, - acl_search.m_ipv4, - acl_search.res_ipv4, - acl_search.num_ipv4); - } - - if (acl_search.num_ipv6) { - rte_acl_classify( - acl_config.acx_ipv6[socketid], - acl_search.data_ipv6, - acl_search.res_ipv6, - acl_search.num_ipv6, - DEFAULT_MAX_CATEGORIES); - - l3fwd_acl_send_packets( - qconf, - acl_search.m_ipv6, - acl_search.res_ipv6, - acl_search.num_ipv6); - } } } } diff --git a/examples/l3fwd/l3fwd_acl_scalar.h b/examples/l3fwd/l3fwd_acl_scalar.h index 542c303d3b..cb22bb49aa 100644 --- a/examples/l3fwd/l3fwd_acl_scalar.h +++ b/examples/l3fwd/l3fwd_acl_scalar.h @@ -6,7 +6,40 @@ #define L3FWD_ACL_SCALAR_H #include "l3fwd.h" +#if defined RTE_ARCH_X86 +#include "l3fwd_sse.h" +#elif defined __ARM_NEON +#include "l3fwd_neon.h" +#elif defined RTE_ARCH_PPC_64 +#include "l3fwd_altivec.h" +#else #include "l3fwd_common.h" +#endif +/* + * If the machine has SSE, NEON or PPC 64 then multiple packets + * can be sent at once if not only single packets will be sent. + */ +#if defined RTE_ARCH_X86 || defined __ARM_NEON || defined RTE_ARCH_PPC_64 +#define ACL_SEND_MULTI +#endif + +#define TYPE_NONE 0 +#define TYPE_IPV4 1 +#define TYPE_IPV6 2 + +struct acl_search_t { + + uint32_t num_ipv4; + uint32_t num_ipv6; + + uint8_t types[MAX_PKT_BURST]; + + const uint8_t *data_ipv4[MAX_PKT_BURST]; + uint32_t res_ipv4[MAX_PKT_BURST]; + + const uint8_t *data_ipv6[MAX_PKT_BURST]; + uint32_t res_ipv6[MAX_PKT_BURST]; +}; static inline void l3fwd_acl_prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl, @@ -16,16 +49,16 @@ l3fwd_acl_prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search_t *acl if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) { /* Fill acl structure */ - acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt); - acl->m_ipv4[(acl->num_ipv4)++] = pkt; + acl->data_ipv4[acl->num_ipv4++] = MBUF_IPV4_2PROTO(pkt); + acl->types[index] = TYPE_IPV4; } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) { /* Fill acl structure */ - acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt); - acl->m_ipv6[(acl->num_ipv6)++] = pkt; + acl->data_ipv6[acl->num_ipv6++] = MBUF_IPV6_2PROTO(pkt); + acl->types[index] = TYPE_IPV6; } else { - /* Unknown type, drop the packet */ - rte_pktmbuf_free(pkt); + /* Unknown type, will drop the packet */ + acl->types[index] = TYPE_NONE; } } @@ -80,30 +113,4 @@ send_packets_single(struct lcore_conf *qconf, struct rte_mbuf *pkts[], uint16_t } } -static inline void -l3fwd_acl_send_packets(struct lcore_conf *qconf, struct rte_mbuf *pkts[], uint32_t res[], - uint32_t nb_tx) -{ - uint32_t i; - uint16_t dst_port[nb_tx]; - - for (i = 0; i != nb_tx; i++) { - if (likely((res[i] & ACL_DENY_SIGNATURE) == 0 && res[i] != 0)) { - dst_port[i] = res[i] - FWD_PORT_SHIFT; - } else { - dst_port[i] = BAD_PORT; -#ifdef L3FWDACL_DEBUG - if ((res & ACL_DENY_SIGNATURE) != 0) { - if (RTE_ETH_IS_IPV4_HDR(pkts[i]->packet_type)) - dump_acl4_rule(pkts[i], res[i]); - else if (RTE_ETH_IS_IPV6_HDR(pkt[i]->packet_type)) - dump_acl6_rule(pkt[i], res[i]); - } -#endif - } - } - - send_packets_single(qconf, pkts, dst_port, nb_tx); -} - #endif /* L3FWD_ACL_SCALAR_H */ -- 2.35.3