* [PATCH v2 1/2] flow_classify: remove library
2023-08-01 16:04 [PATCH v2 0/2] Remove disabled functionality Stephen Hemminger
@ 2023-08-01 16:04 ` Stephen Hemminger
2023-08-01 16:05 ` [PATCH v2 2/2] kni: remove deprecated kernel network interface Stephen Hemminger
2023-08-04 13:02 ` [PATCH v2 0/2] Remove disabled functionality David Marchand
2 siblings, 0 replies; 9+ messages in thread
From: Stephen Hemminger @ 2023-08-01 16:04 UTC (permalink / raw)
To: dev; +Cc: Stephen Hemminger, Thomas Monjalon, Bruce Richardson
The flow_classify library was marked for removal last year
because there was no maintainer and the functionality is
limited.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
MAINTAINERS | 7 -
app/test/meson.build | 4 -
app/test/test_flow_classify.c | 895 ------------------
app/test/test_flow_classify.h | 26 -
doc/api/doxy-api-index.md | 1 -
doc/api/doxy-api.conf.in | 1 -
doc/guides/freebsd_gsg/build_sample_apps.rst | 2 +-
doc/guides/freebsd_gsg/install_from_ports.rst | 2 +-
doc/guides/prog_guide/flow_classify_lib.rst | 424 ---------
doc/guides/prog_guide/index.rst | 1 -
doc/guides/rel_notes/deprecation.rst | 8 +-
doc/guides/rel_notes/release_23_11.rst | 2 +
doc/guides/sample_app_ug/flow_classify.rst | 242 -----
doc/guides/sample_app_ug/index.rst | 1 -
examples/flow_classify/Makefile | 51 -
examples/flow_classify/flow_classify.c | 878 -----------------
examples/flow_classify/ipv4_rules_file.txt | 14 -
examples/flow_classify/meson.build | 13 -
examples/meson.build | 1 -
lib/flow_classify/meson.build | 12 -
lib/flow_classify/rte_flow_classify.c | 670 -------------
lib/flow_classify/rte_flow_classify.h | 284 ------
lib/flow_classify/rte_flow_classify_parse.c | 532 -----------
lib/flow_classify/rte_flow_classify_parse.h | 58 --
lib/flow_classify/version.map | 13 -
lib/meson.build | 3 -
meson_options.txt | 2 +-
27 files changed, 6 insertions(+), 4141 deletions(-)
delete mode 100644 app/test/test_flow_classify.c
delete mode 100644 app/test/test_flow_classify.h
delete mode 100644 doc/guides/prog_guide/flow_classify_lib.rst
delete mode 100644 doc/guides/sample_app_ug/flow_classify.rst
delete mode 100644 examples/flow_classify/Makefile
delete mode 100644 examples/flow_classify/flow_classify.c
delete mode 100644 examples/flow_classify/ipv4_rules_file.txt
delete mode 100644 examples/flow_classify/meson.build
delete mode 100644 lib/flow_classify/meson.build
delete mode 100644 lib/flow_classify/rte_flow_classify.c
delete mode 100644 lib/flow_classify/rte_flow_classify.h
delete mode 100644 lib/flow_classify/rte_flow_classify_parse.c
delete mode 100644 lib/flow_classify/rte_flow_classify_parse.h
delete mode 100644 lib/flow_classify/version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 18bc05fccd0d..dbb25211c367 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1511,13 +1511,6 @@ F: lib/pdcp/
F: doc/guides/prog_guide/pdcp_lib.rst
F: app/test/test_pdcp*
-Flow Classify - EXPERIMENTAL - UNMAINTAINED
-F: lib/flow_classify/
-F: app/test/test_flow_classify*
-F: doc/guides/prog_guide/flow_classify_lib.rst
-F: examples/flow_classify/
-F: doc/guides/sample_app_ug/flow_classify.rst
-
Distributor
M: David Hunt <david.hunt@intel.com>
F: lib/distributor/
diff --git a/app/test/meson.build b/app/test/meson.build
index b89cf0368fb5..90a2e350c7ae 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -359,10 +359,6 @@ if dpdk_conf.has('RTE_EVENT_SKELETON')
test_deps += 'event_skeleton'
endif
-if dpdk_conf.has('RTE_LIB_FLOW_CLASSIFY')
- test_sources += 'test_flow_classify.c'
- fast_tests += [['flow_classify_autotest', false, true]]
-endif
if dpdk_conf.has('RTE_LIB_GRAPH')
test_sources += 'test_graph.c'
fast_tests += [['graph_autotest', true, true]]
diff --git a/app/test/test_flow_classify.c b/app/test/test_flow_classify.c
deleted file mode 100644
index 6e274d88e645..000000000000
--- a/app/test/test_flow_classify.c
+++ /dev/null
@@ -1,895 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#include <string.h>
-#include <errno.h>
-
-#include "test.h"
-
-#include <rte_string_fns.h>
-#include <rte_mbuf.h>
-#include <rte_byteorder.h>
-#include <rte_ip.h>
-
-#ifdef RTE_EXEC_ENV_WINDOWS
-static int
-test_flow_classify(void)
-{
- printf("flow_classify not supported on Windows, skipping test\n");
- return TEST_SKIPPED;
-}
-
-#else
-
-#include <rte_acl.h>
-#include <rte_common.h>
-#include <rte_table_acl.h>
-#include <rte_flow.h>
-#include <rte_flow_classify.h>
-
-#include "packet_burst_generator.h"
-#include "test_flow_classify.h"
-
-
-#define FLOW_CLASSIFY_MAX_RULE_NUM 100
-#define MAX_PKT_BURST 32
-#define NB_SOCKETS 4
-#define MEMPOOL_CACHE_SIZE 256
-#define MBUF_SIZE 512
-#define NB_MBUF 512
-
-/* test UDP, TCP and SCTP packets */
-static struct rte_mempool *mbufpool[NB_SOCKETS];
-static struct rte_mbuf *bufs[MAX_PKT_BURST];
-
-static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
- /* first input field - always one byte long. */
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = PROTO_FIELD_IPV4,
- .input_index = PROTO_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, next_proto_id),
- },
- /* next input field (IPv4 source address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = SRC_FIELD_IPV4,
- .input_index = SRC_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, src_addr),
- },
- /* next input field (IPv4 destination address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = DST_FIELD_IPV4,
- .input_index = DST_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, dst_addr),
- },
- /*
- * Next 2 fields (src & dst ports) form 4 consecutive bytes.
- * They share the same input index.
- */
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = SRCP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, src_port),
- },
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = DSTP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, dst_port),
- },
-};
-
-/* parameters for rte_flow_classify_validate and rte_flow_classify_create */
-
-/* test UDP pattern:
- * "eth / ipv4 src spec 2.2.2.3 src mask 255.255.255.00 dst spec 2.2.2.7
- * dst mask 255.255.255.00 / udp src is 32 dst is 33 / end"
- */
-static struct rte_flow_item_ipv4 ipv4_udp_spec_1 = {
- { { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_UDP, 0,
- RTE_IPV4(2, 2, 2, 3), RTE_IPV4(2, 2, 2, 7)}
-};
-static const struct rte_flow_item_ipv4 ipv4_mask_24 = {
- .hdr = {
- .next_proto_id = 0xff,
- .src_addr = 0xffffff00,
- .dst_addr = 0xffffff00,
- },
-};
-static struct rte_flow_item_udp udp_spec_1 = {
- { 32, 33, 0, 0 }
-};
-
-static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
- 0, 0, 0 };
-static struct rte_flow_item eth_item_bad = { -1, 0, 0, 0 };
-
-static struct rte_flow_item ipv4_udp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
- &ipv4_udp_spec_1, 0, &ipv4_mask_24};
-static struct rte_flow_item ipv4_udp_item_bad = { RTE_FLOW_ITEM_TYPE_IPV4,
- NULL, 0, NULL};
-
-static struct rte_flow_item udp_item_1 = { RTE_FLOW_ITEM_TYPE_UDP,
- &udp_spec_1, 0, &rte_flow_item_udp_mask};
-static struct rte_flow_item udp_item_bad = { RTE_FLOW_ITEM_TYPE_UDP,
- NULL, 0, NULL};
-
-static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
- 0, 0, 0 };
-
-/* test TCP pattern:
- * "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
- * dst mask 255.255.255.00 / tcp src is 16 dst is 17 / end"
- */
-static struct rte_flow_item_ipv4 ipv4_tcp_spec_1 = {
- { { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_TCP, 0,
- RTE_IPV4(1, 2, 3, 4), RTE_IPV4(5, 6, 7, 8)}
-};
-
-static struct rte_flow_item_tcp tcp_spec_1 = {
- { 16, 17, 0, 0, 0, 0, 0, 0, 0}
-};
-
-static struct rte_flow_item ipv4_tcp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
- &ipv4_tcp_spec_1, 0, &ipv4_mask_24};
-
-static struct rte_flow_item tcp_item_1 = { RTE_FLOW_ITEM_TYPE_TCP,
- &tcp_spec_1, 0, &rte_flow_item_tcp_mask};
-
-/* test SCTP pattern:
- * "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8
- * dst mask 255.255.255.00 / sctp src is 16 dst is 17/ end"
- */
-static struct rte_flow_item_ipv4 ipv4_sctp_spec_1 = {
- { { .version_ihl = 0}, 0, 0, 0, 0, 0, IPPROTO_SCTP, 0,
- RTE_IPV4(11, 12, 13, 14), RTE_IPV4(15, 16, 17, 18)}
-};
-
-static struct rte_flow_item_sctp sctp_spec_1 = {
- { 10, 11, 0, 0}
-};
-
-static struct rte_flow_item ipv4_sctp_item_1 = { RTE_FLOW_ITEM_TYPE_IPV4,
- &ipv4_sctp_spec_1, 0, &ipv4_mask_24};
-
-static struct rte_flow_item sctp_item_1 = { RTE_FLOW_ITEM_TYPE_SCTP,
- &sctp_spec_1, 0, &rte_flow_item_sctp_mask};
-
-
-/* test actions:
- * "actions count / end"
- */
-static struct rte_flow_query_count count = {
- .reset = 1,
- .hits_set = 1,
- .bytes_set = 1,
- .hits = 0,
- .bytes = 0,
-};
-static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
- &count};
-static struct rte_flow_action count_action_bad = { -1, 0};
-
-static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
-
-static struct rte_flow_action actions[2];
-
-/* test attributes */
-static struct rte_flow_attr attr;
-
-/* test error */
-static struct rte_flow_error error;
-
-/* test pattern */
-static struct rte_flow_item pattern[4];
-
-/* flow classify data for UDP burst */
-static struct rte_flow_classify_ipv4_5tuple_stats udp_ntuple_stats;
-static struct rte_flow_classify_stats udp_classify_stats = {
- .stats = (void *)&udp_ntuple_stats
-};
-
-/* flow classify data for TCP burst */
-static struct rte_flow_classify_ipv4_5tuple_stats tcp_ntuple_stats;
-static struct rte_flow_classify_stats tcp_classify_stats = {
- .stats = (void *)&tcp_ntuple_stats
-};
-
-/* flow classify data for SCTP burst */
-static struct rte_flow_classify_ipv4_5tuple_stats sctp_ntuple_stats;
-static struct rte_flow_classify_stats sctp_classify_stats = {
- .stats = (void *)&sctp_ntuple_stats
-};
-
-struct flow_classifier_acl *cls;
-
-struct flow_classifier_acl {
- struct rte_flow_classifier *cls;
-} __rte_cache_aligned;
-
-/*
- * test functions by passing invalid or
- * non-workable parameters.
- */
-static int
-test_invalid_parameters(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
-
- ret = rte_flow_classify_validate(NULL, NULL, NULL, NULL, NULL);
- if (!ret) {
- printf("Line %i: rte_flow_classify_validate",
- __LINE__);
- printf(" with NULL param should have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(NULL, NULL, NULL, NULL,
- NULL, NULL);
- if (rule) {
- printf("Line %i: flow_classifier_table_entry_add", __LINE__);
- printf(" with NULL param should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(NULL, NULL);
- if (!ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" with NULL param should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classifier_query(NULL, NULL, 0, NULL, NULL);
- if (!ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" with NULL param should have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(NULL, NULL, NULL, NULL,
- NULL, &error);
- if (rule) {
- printf("Line %i: flow_classify_table_entry_add ", __LINE__);
- printf("with NULL param should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(NULL, NULL);
- if (!ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf("with NULL param should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classifier_query(NULL, NULL, 0, NULL, NULL);
- if (!ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" with NULL param should have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_valid_parameters(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
- int key_found;
-
- /*
- * set up parameters for rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item;
- pattern[1] = ipv4_udp_item_1;
- pattern[2] = udp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (ret) {
- printf("Line %i: rte_flow_classify_validate",
- __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
-
- if (!rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_invalid_patterns(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
- int key_found;
-
- /*
- * set up parameters for rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item_bad;
- pattern[1] = ipv4_udp_item_1;
- pattern[2] = udp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- pattern[0] = eth_item;
- pattern[1] = ipv4_udp_item_bad;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (!ret) {
- printf("Line %i: rte_flow_classify_validate", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (!ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- pattern[1] = ipv4_udp_item_1;
- pattern[2] = udp_item_bad;
- pattern[3] = end_item;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (!ret) {
- printf("Line %i: rte_flow_classify_validate", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (!ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_invalid_actions(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
- int key_found;
-
- /*
- * set up parameters for rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item;
- pattern[1] = ipv4_udp_item_1;
- pattern[2] = udp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action_bad;
- actions[1] = end_action;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (!ret) {
- printf("Line %i: rte_flow_classify_validate", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (!ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should have failed!\n");
- return -1;
- }
-
- return 0;
-}
-
-static int
-init_ipv4_udp_traffic(struct rte_mempool *mp,
- struct rte_mbuf **pkts_burst, uint32_t burst_size)
-{
- struct rte_ether_hdr pkt_eth_hdr;
- struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct rte_udp_hdr pkt_udp_hdr;
- uint32_t src_addr = IPV4_ADDR(2, 2, 2, 3);
- uint32_t dst_addr = IPV4_ADDR(2, 2, 2, 7);
- uint16_t src_port = 32;
- uint16_t dst_port = 33;
- uint16_t pktlen;
-
- static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
- static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
-
- printf("Set up IPv4 UDP traffic\n");
- initialize_eth_header(&pkt_eth_hdr,
- (struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
- pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
- printf("ETH pktlen %u\n", pktlen);
-
- pktlen = initialize_ipv4_header(&pkt_ipv4_hdr, src_addr, dst_addr,
- pktlen);
- printf("ETH + IPv4 pktlen %u\n", pktlen);
-
- pktlen = initialize_udp_header(&pkt_udp_hdr, src_port, dst_port,
- pktlen);
- printf("ETH + IPv4 + UDP pktlen %u\n\n", pktlen);
-
- return generate_packet_burst(mp, pkts_burst, &pkt_eth_hdr,
- 0, &pkt_ipv4_hdr, 1,
- &pkt_udp_hdr, burst_size,
- PACKET_BURST_GEN_PKT_LEN, 1);
-}
-
-static int
-init_ipv4_tcp_traffic(struct rte_mempool *mp,
- struct rte_mbuf **pkts_burst, uint32_t burst_size)
-{
- struct rte_ether_hdr pkt_eth_hdr;
- struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct rte_tcp_hdr pkt_tcp_hdr;
- uint32_t src_addr = IPV4_ADDR(1, 2, 3, 4);
- uint32_t dst_addr = IPV4_ADDR(5, 6, 7, 8);
- uint16_t src_port = 16;
- uint16_t dst_port = 17;
- uint16_t pktlen;
-
- static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
- static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
-
- printf("Set up IPv4 TCP traffic\n");
- initialize_eth_header(&pkt_eth_hdr,
- (struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
- pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
- printf("ETH pktlen %u\n", pktlen);
-
- pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
- dst_addr, pktlen, IPPROTO_TCP);
- printf("ETH + IPv4 pktlen %u\n", pktlen);
-
- pktlen = initialize_tcp_header(&pkt_tcp_hdr, src_port, dst_port,
- pktlen);
- printf("ETH + IPv4 + TCP pktlen %u\n\n", pktlen);
-
- return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
- 0, &pkt_ipv4_hdr, 1, IPPROTO_TCP,
- &pkt_tcp_hdr, burst_size,
- PACKET_BURST_GEN_PKT_LEN, 1);
-}
-
-static int
-init_ipv4_sctp_traffic(struct rte_mempool *mp,
- struct rte_mbuf **pkts_burst, uint32_t burst_size)
-{
- struct rte_ether_hdr pkt_eth_hdr;
- struct rte_ipv4_hdr pkt_ipv4_hdr;
- struct rte_sctp_hdr pkt_sctp_hdr;
- uint32_t src_addr = IPV4_ADDR(11, 12, 13, 14);
- uint32_t dst_addr = IPV4_ADDR(15, 16, 17, 18);
- uint16_t src_port = 10;
- uint16_t dst_port = 11;
- uint16_t pktlen;
-
- static uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
- static uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA };
-
- printf("Set up IPv4 SCTP traffic\n");
- initialize_eth_header(&pkt_eth_hdr,
- (struct rte_ether_addr *)src_mac,
- (struct rte_ether_addr *)dst_mac, RTE_ETHER_TYPE_IPV4, 0, 0);
- pktlen = (uint16_t)(sizeof(struct rte_ether_hdr));
- printf("ETH pktlen %u\n", pktlen);
-
- pktlen = initialize_ipv4_header_proto(&pkt_ipv4_hdr, src_addr,
- dst_addr, pktlen, IPPROTO_SCTP);
- printf("ETH + IPv4 pktlen %u\n", pktlen);
-
- pktlen = initialize_sctp_header(&pkt_sctp_hdr, src_port, dst_port,
- pktlen);
- printf("ETH + IPv4 + SCTP pktlen %u\n\n", pktlen);
-
- return generate_packet_burst_proto(mp, pkts_burst, &pkt_eth_hdr,
- 0, &pkt_ipv4_hdr, 1, IPPROTO_SCTP,
- &pkt_sctp_hdr, burst_size,
- PACKET_BURST_GEN_PKT_LEN, 1);
-}
-
-static int
-init_mbufpool(void)
-{
- int socketid;
- int ret = 0;
- unsigned int lcore_id;
- char s[64];
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- if (rte_lcore_is_enabled(lcore_id) == 0)
- continue;
-
- socketid = rte_lcore_to_socket_id(lcore_id);
- if (socketid >= NB_SOCKETS) {
- printf(
- "Socket %d of lcore %u is out of range %d\n",
- socketid, lcore_id, NB_SOCKETS);
- ret = -1;
- break;
- }
- if (mbufpool[socketid] == NULL) {
- snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
- mbufpool[socketid] =
- rte_pktmbuf_pool_create(s, NB_MBUF,
- MEMPOOL_CACHE_SIZE, 0, MBUF_SIZE,
- socketid);
- if (mbufpool[socketid]) {
- printf("Allocated mbuf pool on socket %d\n",
- socketid);
- } else {
- printf("Cannot init mbuf pool on socket %d\n",
- socketid);
- ret = -ENOMEM;
- break;
- }
- }
- }
- return ret;
-}
-
-static int
-test_query_udp(void)
-{
- struct rte_flow_error error;
- struct rte_flow_classify_rule *rule;
- int ret;
- int i;
- int key_found;
-
- ret = init_ipv4_udp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
- if (ret != MAX_PKT_BURST) {
- printf("Line %i: init_udp_ipv4_traffic has failed!\n",
- __LINE__);
- return -1;
- }
-
- for (i = 0; i < MAX_PKT_BURST; i++)
- bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
-
- /*
- * set up parameters for rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item;
- pattern[1] = ipv4_udp_item_1;
- pattern[2] = udp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (ret) {
- printf("Line %i: rte_flow_classify_validate", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (!rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
- rule, &udp_classify_stats);
- if (ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_query_tcp(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
- int i;
- int key_found;
-
- ret = init_ipv4_tcp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
- if (ret != MAX_PKT_BURST) {
- printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
- __LINE__);
- return -1;
- }
-
- for (i = 0; i < MAX_PKT_BURST; i++)
- bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
-
- /*
- * set up parameters for rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item;
- pattern[1] = ipv4_tcp_item_1;
- pattern[2] = tcp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (!rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
- rule, &tcp_classify_stats);
- if (ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_query_sctp(void)
-{
- struct rte_flow_classify_rule *rule;
- int ret;
- int i;
- int key_found;
-
- ret = init_ipv4_sctp_traffic(mbufpool[0], bufs, MAX_PKT_BURST);
- if (ret != MAX_PKT_BURST) {
- printf("Line %i: init_ipv4_tcp_traffic has failed!\n",
- __LINE__);
- return -1;
- }
-
- for (i = 0; i < MAX_PKT_BURST; i++)
- bufs[i]->packet_type = RTE_PTYPE_L3_IPV4;
-
- /*
- * set up parameters rte_flow_classify_validate,
- * rte_flow_classify_table_entry_add and
- * rte_flow_classify_table_entry_delete
- */
-
- attr.ingress = 1;
- attr.priority = 1;
- pattern[0] = eth_item;
- pattern[1] = ipv4_sctp_item_1;
- pattern[2] = sctp_item_1;
- pattern[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- ret = rte_flow_classify_validate(cls->cls, &attr, pattern,
- actions, &error);
- if (ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern,
- actions, &key_found, &error);
- if (!rule) {
- printf("Line %i: flow_classify_table_entry_add", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classifier_query(cls->cls, bufs, MAX_PKT_BURST,
- rule, &sctp_classify_stats);
- if (ret) {
- printf("Line %i: flow_classifier_query", __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
-
- ret = rte_flow_classify_table_entry_delete(cls->cls, rule);
- if (ret) {
- printf("Line %i: rte_flow_classify_table_entry_delete",
- __LINE__);
- printf(" should not have failed!\n");
- return -1;
- }
- return 0;
-}
-
-static int
-test_flow_classify(void)
-{
- struct rte_table_acl_params table_acl_params;
- struct rte_flow_classify_table_params cls_table_params;
- struct rte_flow_classifier_params cls_params;
- int ret;
- uint32_t size;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
- cls = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
-
- cls_params.name = "flow_classifier";
- cls_params.socket_id = 0;
- cls->cls = rte_flow_classifier_create(&cls_params);
- if (cls->cls == NULL) {
- printf("Line %i: flow classifier create has failed!\n",
- __LINE__);
- rte_free(cls);
- return TEST_FAILED;
- }
-
- /* initialise ACL table params */
- table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
- table_acl_params.name = "table_acl_ipv4_5tuple";
- table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
- memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
-
- /* initialise table create params */
- cls_table_params.ops = &rte_table_acl_ops;
- cls_table_params.arg_create = &table_acl_params;
- cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
-
- ret = rte_flow_classify_table_create(cls->cls, &cls_table_params);
- if (ret) {
- printf("Line %i: f_create has failed!\n", __LINE__);
- rte_flow_classifier_free(cls->cls);
- rte_free(cls);
- return TEST_FAILED;
- }
- printf("Created table_acl for for IPv4 five tuple packets\n");
-
- ret = init_mbufpool();
- if (ret) {
- printf("Line %i: init_mbufpool has failed!\n", __LINE__);
- return TEST_FAILED;
- }
-
- if (test_invalid_parameters() < 0)
- return TEST_FAILED;
- if (test_valid_parameters() < 0)
- return TEST_FAILED;
- if (test_invalid_patterns() < 0)
- return TEST_FAILED;
- if (test_invalid_actions() < 0)
- return TEST_FAILED;
- if (test_query_udp() < 0)
- return TEST_FAILED;
- if (test_query_tcp() < 0)
- return TEST_FAILED;
- if (test_query_sctp() < 0)
- return TEST_FAILED;
-
- return TEST_SUCCESS;
-}
-
-#endif /* !RTE_EXEC_ENV_WINDOWS */
-
-REGISTER_TEST_COMMAND(flow_classify_autotest, test_flow_classify);
diff --git a/app/test/test_flow_classify.h b/app/test/test_flow_classify.h
deleted file mode 100644
index 6bd10ec972e5..000000000000
--- a/app/test/test_flow_classify.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef TEST_FLOW_CLASSIFY_H_
-#define TEST_FLOW_CLASSIFY_H_
-
-/* ACL field definitions for IPv4 5 tuple rule */
-
-enum {
- PROTO_FIELD_IPV4,
- SRC_FIELD_IPV4,
- DST_FIELD_IPV4,
- SRCP_FIELD_IPV4,
- DSTP_FIELD_IPV4,
- NUM_FIELDS_IPV4
-};
-
-enum {
- PROTO_INPUT_IPV4,
- SRC_INPUT_IPV4,
- DST_INPUT_IPV4,
- SRCP_DESTP_INPUT_IPV4
-};
-
-#endif /* TEST_FLOW_CLASSIFY_H_ */
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 3bc8778981f6..5cd8c9de8105 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -160,7 +160,6 @@ The public API headers are grouped by topics:
[EFD](@ref rte_efd.h),
[ACL](@ref rte_acl.h),
[member](@ref rte_member.h),
- [flow classify](@ref rte_flow_classify.h),
[BPF](@ref rte_bpf.h)
- **containers**:
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 1a4210b948a8..9a9c52e5569c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -40,7 +40,6 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/lib/ethdev \
@TOPDIR@/lib/eventdev \
@TOPDIR@/lib/fib \
- @TOPDIR@/lib/flow_classify \
@TOPDIR@/lib/gpudev \
@TOPDIR@/lib/graph \
@TOPDIR@/lib/gro \
diff --git a/doc/guides/freebsd_gsg/build_sample_apps.rst b/doc/guides/freebsd_gsg/build_sample_apps.rst
index b1ab7545b1ce..7bdd88e56d32 100644
--- a/doc/guides/freebsd_gsg/build_sample_apps.rst
+++ b/doc/guides/freebsd_gsg/build_sample_apps.rst
@@ -31,7 +31,7 @@ the installation of DPDK using `meson install` as described previously::
$ gmake
cc -O3 -I/usr/local/include -include rte_config.h -march=native
-D__BSD_VISIBLE main.c -o build/helloworld-shared
- -L/usr/local/lib -lrte_telemetry -lrte_bpf -lrte_flow_classify
+ -L/usr/local/lib -lrte_telemetry -lrte_bpf
-lrte_pipeline -lrte_table -lrte_port -lrte_fib -lrte_ipsec
-lrte_stack -lrte_security -lrte_sched -lrte_reorder -lrte_rib
-lrte_rcu -lrte_rawdev -lrte_pdump -lrte_member -lrte_lpm
diff --git a/doc/guides/freebsd_gsg/install_from_ports.rst b/doc/guides/freebsd_gsg/install_from_ports.rst
index d946f3f3b2eb..3c98c46b298f 100644
--- a/doc/guides/freebsd_gsg/install_from_ports.rst
+++ b/doc/guides/freebsd_gsg/install_from_ports.rst
@@ -84,7 +84,7 @@ via the contigmem module, and 4 NIC ports bound to the nic_uio module::
cd helloworld/
gmake
- cc -O3 -I/usr/local/include -include rte_config.h -march=corei7 -D__BSD_VISIBLE main.c -o build/helloworld-shared -L/usr/local/lib -lrte_bpf -lrte_flow_classify -lrte_pipeline -lrte_table -lrte_port -lrte_fib -lrte_ipsec -lrte_stack -lrte_security -lrte_sched -lrte_reorder -lrte_rib -lrte_rcu -lrte_rawdev -lrte_pdump -lrte_member -lrte_lpm -lrte_latencystats -lrte_jobstats -lrte_ip_frag -lrte_gso -lrte_gro -lrte_eventdev -lrte_efd -lrte_distributor -lrte_cryptodev -lrte_compressdev -lrte_cfgfile -lrte_bitratestats -lrte_bbdev -lrte_acl -lrte_timer -lrte_hash -lrte_metrics -lrte_cmdline -lrte_pci -lrte_ethdev -lrte_meter -lrte_net -lrte_mbuf -lrte_mempool -lrte_ring -lrte_eal -lrte_kvargs
+ cc -O3 -I/usr/local/include -include rte_config.h -march=corei7 -D__BSD_VISIBLE main.c -o build/helloworld-shared -L/usr/local/lib -lrte_bpf -lrte_pipeline -lrte_table -lrte_port -lrte_fib -lrte_ipsec -lrte_stack -lrte_security -lrte_sched -lrte_reorder -lrte_rib -lrte_rcu -lrte_rawdev -lrte_pdump -lrte_member -lrte_lpm -lrte_latencystats -lrte_jobstats -lrte_ip_frag -lrte_gso -lrte_gro -lrte_eventdev -lrte_efd -lrte_distributor -lrte_cryptodev -lrte_compressdev -lrte_cfgfile -lrte_bitratestats -lrte_bbdev -lrte_acl -lrte_timer -lrte_hash -lrte_metrics -lrte_cmdline -lrte_pci -lrte_ethdev -lrte_meter -lrte_net -lrte_mbuf -lrte_mempool -lrte_ring -lrte_eal -lrte_kvargs
ln -sf helloworld-shared build/helloworld
sudo ./build/helloworld -l 0-3
diff --git a/doc/guides/prog_guide/flow_classify_lib.rst b/doc/guides/prog_guide/flow_classify_lib.rst
deleted file mode 100644
index ad2e10ec2681..000000000000
--- a/doc/guides/prog_guide/flow_classify_lib.rst
+++ /dev/null
@@ -1,424 +0,0 @@
-.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2017 Intel Corporation.
-
-Flow Classification Library
-===========================
-
-.. note::
-
- The Flow Classification library is deprecated and will be removed in future.
- See :doc:`../rel_notes/deprecation`.
-
- It is disabled by default in the DPDK build.
- To re-enable the library, remove 'flow_classify' from the "disable_libs"
- meson option when configuring a build.
-
-DPDK provides a Flow Classification library that provides the ability
-to classify an input packet by matching it against a set of Flow rules.
-
-The initial implementation supports counting of IPv4 5-tuple packets which match
-a particular Flow rule only.
-
-Please refer to the
-:doc:`./rte_flow`
-for more information.
-
-The Flow Classification library uses the ``librte_table`` API for managing Flow
-rules and matching packets against the Flow rules.
-The library is table agnostic and can use the following tables:
-``Access Control List``, ``Hash`` and ``Longest Prefix Match(LPM)``.
-The ``Access Control List`` table is used in the initial implementation.
-
-Please refer to the
-:doc:`./packet_framework`
-for more information.on ``librte_table``.
-
-DPDK provides an Access Control List library that provides the ability to
-classify an input packet based on a set of classification rules.
-
-Please refer to the
-:doc:`./packet_classif_access_ctrl`
-library for more information on ``librte_acl``.
-
-There is also a Flow Classify sample application which demonstrates the use of
-the Flow Classification Library API's.
-
-Please refer to the
-:doc:`../sample_app_ug/flow_classify`
-for more information on the ``flow_classify`` sample application.
-
-Overview
---------
-
-The library has the following API's
-
-.. code-block:: c
-
- /**
- * Flow classifier create
- *
- * @param params
- * Parameters for flow classifier creation
- * @return
- * Handle to flow classifier instance on success or NULL otherwise
- */
- struct rte_flow_classifier *
- rte_flow_classifier_create(struct rte_flow_classifier_params *params);
-
- /**
- * Flow classifier free
- *
- * @param cls
- * Handle to flow classifier instance
- * @return
- * 0 on success, error code otherwise
- */
- int
- rte_flow_classifier_free(struct rte_flow_classifier *cls);
-
- /**
- * Flow classify table create
- *
- * @param cls
- * Handle to flow classifier instance
- * @param params
- * Parameters for flow_classify table creation
- * @return
- * 0 on success, error code otherwise
- */
- int
- rte_flow_classify_table_create(struct rte_flow_classifier *cls,
- struct rte_flow_classify_table_params *params);
-
- /**
- * Validate the flow classify rule
- *
- * @param[in] cls
- * Handle to flow classifier instance
- * @param[in] attr
- * Flow rule attributes
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END pattern item).
- * @param[out] error
- * Perform verbose error reporting if not NULL. Structure
- * initialised in case of error only.
- * @return
- * 0 on success, error code otherwise
- */
- int
- rte_flow_classify_validate(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-
- /**
- * Add a flow classify rule to the flow_classifier table.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] attr
- * Flow rule attributes
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END pattern item).
- * @param[out] key_found
- * returns 1 if rule present already, 0 otherwise.
- * @param[out] error
- * Perform verbose error reporting if not NULL. Structure
- * initialised in case of error only.
- * @return
- * A valid handle in case of success, NULL otherwise.
- */
- struct rte_flow_classify_rule *
- rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- int *key_found;
- struct rte_flow_error *error);
-
- /**
- * Delete a flow classify rule from the flow_classifier table.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] rule
- * Flow classify rule
- * @return
- * 0 on success, error code otherwise.
- */
- int
- rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
- struct rte_flow_classify_rule *rule);
-
- /**
- * Query flow classifier for given rule.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] pkts
- * Pointer to packets to process
- * @param[in] nb_pkts
- * Number of packets to process
- * @param[in] rule
- * Flow classify rule
- * @param[in] stats
- * Flow classify stats
- *
- * @return
- * 0 on success, error code otherwise.
- */
- int
- rte_flow_classifier_query(struct rte_flow_classifier *cls,
- struct rte_mbuf **pkts,
- const uint16_t nb_pkts,
- struct rte_flow_classify_rule *rule,
- struct rte_flow_classify_stats *stats);
-
-Classifier creation
-~~~~~~~~~~~~~~~~~~~
-
-The application creates the ``Classifier`` using the
-``rte_flow_classifier_create`` API.
-The ``rte_flow_classify_params`` structure must be initialised by the
-application before calling the API.
-
-.. code-block:: c
-
- struct rte_flow_classifier_params {
- /** flow classifier name */
- const char *name;
-
- /** CPU socket ID where memory for the flow classifier and its */
- /** elements (tables) should be allocated */
- int socket_id;
- };
-
-The ``Classifier`` has the following internal structures:
-
-.. code-block:: c
-
- struct rte_cls_table {
- /* Input parameters */
- struct rte_table_ops ops;
- uint32_t entry_size;
- enum rte_flow_classify_table_type type;
-
- /* Handle to the low-level table object */
- void *h_table;
- };
-
- #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
-
- struct rte_flow_classifier {
- /* Input parameters */
- char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
- int socket_id;
-
- /* Internal */
- /* ntuple_filter */
- struct rte_eth_ntuple_filter ntuple_filter;
-
- /* classifier tables */
- struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
- uint32_t table_mask;
- uint32_t num_tables;
-
- uint16_t nb_pkts;
- struct rte_flow_classify_table_entry
- *entries[RTE_PORT_IN_BURST_SIZE_MAX];
- } __rte_cache_aligned;
-
-Adding a table to the Classifier
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The application adds a table to the ``Classifier`` using the
-``rte_flow_classify_table_create`` API.
-The ``rte_flow_classify_table_params`` structure must be initialised by the
-application before calling the API.
-
-.. code-block:: c
-
- struct rte_flow_classify_table_params {
- /** Table operations (specific to each table type) */
- struct rte_table_ops *ops;
-
- /** Opaque param to be passed to the table create operation */
- void *arg_create;
-
- /** Classifier table type */
- enum rte_flow_classify_table_type type;
- };
-
-To create an ACL table the ``rte_table_acl_params`` structure must be
-initialised and assigned to ``arg_create`` in the
-``rte_flow_classify_table_params`` structure.
-
-.. code-block:: c
-
- struct rte_table_acl_params {
- /** Name */
- const char *name;
-
- /** Maximum number of ACL rules in the table */
- uint32_t n_rules;
-
- /** Number of fields in the ACL rule specification */
- uint32_t n_rule_fields;
-
- /** Format specification of the fields of the ACL rule */
- struct rte_acl_field_def field_format[RTE_ACL_MAX_FIELDS];
- };
-
-The fields for the ACL rule must also be initialised by the application.
-
-An ACL table can be added to the ``Classifier`` for each ACL rule, for example
-another table could be added for the IPv6 5-tuple rule.
-
-Flow Parsing
-~~~~~~~~~~~~
-
-The library currently supports three IPv4 5-tuple flow patterns, for UDP, TCP
-and SCTP.
-
-.. code-block:: c
-
- /* Pattern for IPv4 5-tuple UDP filter */
- static enum rte_flow_item_type pattern_ntuple_1[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
- };
-
- /* Pattern for IPv4 5-tuple TCP filter */
- static enum rte_flow_item_type pattern_ntuple_2[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
- };
-
- /* Pattern for IPv4 5-tuple SCTP filter */
- static enum rte_flow_item_type pattern_ntuple_3[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
- };
-
-The API function ``rte_flow_classify_validate`` parses the
-IPv4 5-tuple pattern, attributes and actions and returns the 5-tuple data in the
-``rte_eth_ntuple_filter`` structure.
-
-.. code-block:: c
-
- static int
- rte_flow_classify_validate(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-
-Adding Flow Rules
-~~~~~~~~~~~~~~~~~
-
-The ``rte_flow_classify_table_entry_add`` API creates an
-``rte_flow_classify`` object which contains the flow_classify id and type, the
-action, a union of add and delete keys and a union of rules.
-It uses the ``rte_flow_classify_validate`` API function for parsing the
-flow parameters.
-The 5-tuple ACL key data is obtained from the ``rte_eth_ntuple_filter``
-structure populated by the ``classify_parse_ntuple_filter`` function which
-parses the Flow rule.
-
-.. code-block:: c
-
- struct acl_keys {
- struct rte_table_acl_rule_add_params key_add; /* add key */
- struct rte_table_acl_rule_delete_params key_del; /* delete key */
- };
-
- struct classify_rules {
- enum rte_flow_classify_rule_type type;
- union {
- struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
- } u;
- };
-
- struct rte_flow_classify {
- uint32_t id; /* unique ID of classify object */
- enum rte_flow_classify_table_type tbl_type; /* rule table */
- struct classify_rules rules; /* union of rules */
- union {
- struct acl_keys key;
- } u;
- int key_found; /* rule key found in table */
- struct rte_flow_classify_table_entry entry; /* rule meta data */
- void *entry_ptr; /* handle to the table entry for rule meta data */
- };
-
-It then calls the ``table.ops.f_add`` API to add the rule to the ACL
-table.
-
-Deleting Flow Rules
-~~~~~~~~~~~~~~~~~~~
-
-The ``rte_flow_classify_table_entry_delete`` API calls the
-``table.ops.f_delete`` API to delete a rule from the ACL table.
-
-Packet Matching
-~~~~~~~~~~~~~~~
-
-The ``rte_flow_classifier_query`` API is used to find packets which match a
-given flow rule in the table.
-This API calls the flow_classify_run internal function which calls the
-``table.ops.f_lookup`` API to see if any packets in a burst match any
-of the Flow rules in the table.
-The meta data for the highest priority rule matched for each packet is returned
-in the entries array in the ``rte_flow_classify`` object.
-The internal function ``action_apply`` implements the ``Count`` action which is
-used to return data which matches a particular Flow rule.
-
-The rte_flow_classifier_query API uses the following structures to return data
-to the application.
-
-.. code-block:: c
-
- /** IPv4 5-tuple data */
- struct rte_flow_classify_ipv4_5tuple {
- uint32_t dst_ip; /**< Destination IP address in big endian. */
- uint32_t dst_ip_mask; /**< Mask of destination IP address. */
- uint32_t src_ip; /**< Source IP address in big endian. */
- uint32_t src_ip_mask; /**< Mask of destination IP address. */
- uint16_t dst_port; /**< Destination port in big endian. */
- uint16_t dst_port_mask; /**< Mask of destination port. */
- uint16_t src_port; /**< Source Port in big endian. */
- uint16_t src_port_mask; /**< Mask of source port. */
- uint8_t proto; /**< L4 protocol. */
- uint8_t proto_mask; /**< Mask of L4 protocol. */
- };
-
- /**
- * Flow stats
- *
- * For the count action, stats can be returned by the query API.
- *
- * Storage for stats is provided by the application.
- *
- *
- */
- struct rte_flow_classify_stats {
- void *stats;
- };
-
- struct rte_flow_classify_5tuple_stats {
- /** count of packets that match IPv4 5tuple pattern */
- uint64_t counter1;
- /** IPv4 5tuple data */
- struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
- };
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index d89cd3edb63c..c04847bfa148 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -43,7 +43,6 @@ Programmer's Guide
lpm6_lib
fib_lib
rib_lib
- flow_classify_lib
packet_distrib_lib
reorder_lib
ip_fragment_reassembly_lib
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 494b401cda4b..ce5a8f0361cb 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -35,7 +35,7 @@ Deprecation Notices
which also added support for standard atomics
(Ref: https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html)
-* build: Enabling deprecated libraries (``flow_classify``, ``kni``)
+* build: Enabling deprecated libraries (``kni``)
won't be possible anymore through the use of the ``disable_libs`` build option.
A new build option for deprecated libraries will be introduced instead.
@@ -200,12 +200,6 @@ Deprecation Notices
Since these functions are not called directly by the application,
the API remains unaffected.
-* flow_classify: The flow_classify library and example have no maintainer.
- The library is experimental and, as such, it could be removed from DPDK.
- Its removal has been postponed to let potential users report interest
- in maintaining it.
- In the absence of such interest, this library will be removed in DPDK 23.11.
-
* pipeline: The pipeline library legacy API (functions rte_pipeline_*)
will be deprecated and subsequently removed in DPDK 24.11 release.
Before this, the new pipeline library API (functions rte_swx_pipeline_*)
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 6b4dd21fd0e1..9d96dbdcd302 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -68,6 +68,8 @@ Removed Items
Also, make sure to start the actual text at the margin.
=======================================================
+* flow_classify: Removed flow classification library and examples.
+
API Changes
-----------
diff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst
deleted file mode 100644
index 6c4c04e935e4..000000000000
--- a/doc/guides/sample_app_ug/flow_classify.rst
+++ /dev/null
@@ -1,242 +0,0 @@
-.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2017 Intel Corporation.
-
-Flow Classify Sample Application
-================================
-
-The Flow Classify sample application is based on the simple *skeleton* example
-of a forwarding application.
-
-It is intended as a demonstration of the basic components of a DPDK forwarding
-application which uses the Flow Classify library API's.
-
-Please refer to the
-:doc:`../prog_guide/flow_classify_lib`
-for more information.
-
-Compiling the Application
--------------------------
-
-To compile the sample application see :doc:`compiling`.
-
-The application is located in the ``flow_classify`` sub-directory.
-
-Running the Application
------------------------
-
-To run the example in a ``linux`` environment:
-
-.. code-block:: console
-
- ./<build_dir>/examples/dpdk-flow_classify -c 4 -n 4 -- /
- --rule_ipv4="../ipv4_rules_file.txt"
-
-Please refer to the *DPDK Getting Started Guide*, section
-:doc:`../linux_gsg/build_sample_apps`
-for general information on running applications and the Environment Abstraction
-Layer (EAL) options.
-
-
-Sample ipv4_rules_file.txt
---------------------------
-
-.. code-block:: console
-
- #file format:
- #src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
- #
- 2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
- 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
- 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
- 9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
- 6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
-
-Explanation
------------
-
-The following sections provide an explanation of the main components of the
-code.
-
-All DPDK library functions used in the sample code are prefixed with ``rte_``
-and are explained in detail in the *DPDK API Documentation*.
-
-ACL field definitions for the IPv4 5 tuple rule
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The following field definitions are used when creating the ACL table during
-initialisation of the ``Flow Classify`` application
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Creation of ACL table during initialization of application. 8<
- :end-before: >8 End of creation of ACL table.
-
-The Main Function
-~~~~~~~~~~~~~~~~~
-
-The ``main()`` function performs the initialization and calls the execution
-threads for each lcore.
-
-The first task is to initialize the Environment Abstraction Layer (EAL).
-The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
-function. The value returned is the number of parsed arguments:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Initialize the Environment Abstraction Layer (EAL). 8<
- :end-before: >8 End of initialization of EAL.
- :dedent: 1
-
-It then parses the flow_classify application arguments
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Parse application arguments (after the EAL ones). 8<
- :end-before: >8 End of parse application arguments.
- :dedent: 1
-
-The ``main()`` function also allocates a mempool to hold the mbufs
-(Message Buffers) used by the application:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Creates a new mempool in memory to hold the mbufs. 8<
- :end-before: >8 End of creation of new mempool in memory.
- :dedent: 1
-
-mbufs are the packet buffer structure used by DPDK. They are explained in
-detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
-
-The ``main()`` function also initializes all the ports using the user defined
-``port_init()`` function which is explained in the next section:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Initialize all ports. 8<
- :end-before: >8 End of initialization of all ports.
- :dedent: 1
-
-The ``main()`` function creates the ``flow classifier object`` and adds an ``ACL
-table`` to the flow classifier.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Creation of flow classifier object. 8<
- :end-before: >8 End of creation of flow classifier object.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Memory allocation. 8<
- :end-before: >8 End of initialization of table create params.
- :dedent: 1
-
-It then reads the ipv4_rules_file.txt file and initialises the parameters for
-the ``rte_flow_classify_table_entry_add`` API.
-This API adds a rule to the ACL table.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Read file of IPv4 tuple rules. 8<
- :end-before: >8 End of reading file of IPv4 5 tuple rules.
- :dedent: 1
-
-Once the initialization is complete, the application is ready to launch a
-function on an lcore. In this example ``lcore_main()`` is called on a single
-lcore.
-
-.. code-block:: c
-
- lcore_main(cls_app);
-
-The ``lcore_main()`` function is explained below.
-
-The Port Initialization Function
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The main functional part of the port initialization used in the Basic
-Forwarding application is shown below:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Initializing port using global settings. 8<
- :end-before: >8 End of initializing a given port.
-
-The Ethernet ports are configured with default settings using the
-``rte_eth_dev_configure()`` function.
-
-For this example the ports are set up with 1 RX and 1 TX queue using the
-``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
-
-The Ethernet port is then started:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Start the Ethernet port. 8<
- :end-before: >8 End of starting the Ethernet port.
- :dedent: 1
-
-
-Finally the RX port is set in promiscuous mode:
-
-.. code-block:: c
-
- retval = rte_eth_promiscuous_enable(port);
-
-The Add Rules function
-~~~~~~~~~~~~~~~~~~~~~~
-
-The ``add_rules`` function reads the ``ipv4_rules_file.txt`` file and calls the
-``add_classify_rule`` function which calls the
-``rte_flow_classify_table_entry_add`` API.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Reads file and calls the add_classify_rule function. 8<
- :end-before: >8 End of add_rules.
-
-
-The Lcore Main function
-~~~~~~~~~~~~~~~~~~~~~~~
-
-As we saw above the ``main()`` function calls an application function on the
-available lcores.
-The ``lcore_main`` function calls the ``rte_flow_classifier_query`` API.
-For the Basic Forwarding application the ``lcore_main`` function looks like the
-following:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Flow classify data. 8<
- :end-before: >8 End of flow classify data.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Classifying the packets. 8<
- :end-before: >8 End of lcore main.
-
-The main work of the application is done within the loop:
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Run until the application is quit or killed. 8<
- :end-before: >8 End of main loop.
- :dedent: 1
-
-Packets are received in bursts on the RX ports and transmitted in bursts on
-the TX ports. The ports are grouped in pairs with a simple mapping scheme
-using the an XOR on the port number::
-
- 0 -> 1
- 1 -> 0
-
- 2 -> 3
- 3 -> 2
-
- etc.
-
-The ``rte_eth_tx_burst()`` function frees the memory buffers of packets that
-are transmitted. If packets fail to transmit, ``(nb_tx < nb_rx)``, then they
-must be freed explicitly using ``rte_pktmbuf_free()``.
-
-The forwarding loop can be interrupted and the application closed using
-``Ctrl-C``.
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index 6e1e83d7d7c8..19485556c765 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -15,7 +15,6 @@ Sample Applications User Guides
hello_world
skeleton
rxtx_callbacks
- flow_classify
flow_filtering
ip_frag
ipv4_multicast
diff --git a/examples/flow_classify/Makefile b/examples/flow_classify/Makefile
deleted file mode 100644
index 539bf9682b06..000000000000
--- a/examples/flow_classify/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
-
-# binary name
-APP = flow_classify
-
-# all source are stored in SRCS-y
-SRCS-y := flow_classify.c
-
-PKGCONF ?= pkg-config
-
-# Build using pkg-config variables if possible
-ifneq ($(shell $(PKGCONF) --exists libdpdk && echo 0),0)
-$(error "no installation of DPDK found")
-endif
-
-all: shared
-.PHONY: shared static
-shared: build/$(APP)-shared
- ln -sf $(APP)-shared build/$(APP)
-static: build/$(APP)-static
- ln -sf $(APP)-static build/$(APP)
-
-PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
-CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
-LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
-LDFLAGS_STATIC = $(shell $(PKGCONF) --static --libs libdpdk)
-
-ifeq ($(MAKECMDGOALS),static)
-# check for broken pkg-config
-ifeq ($(shell echo $(LDFLAGS_STATIC) | grep 'whole-archive.*l:lib.*no-whole-archive'),)
-$(warning "pkg-config output list does not contain drivers between 'whole-archive'/'no-whole-archive' flags.")
-$(error "Cannot generate statically-linked binaries with this version of pkg-config")
-endif
-endif
-
-CFLAGS += -DALLOW_EXPERIMENTAL_API
-
-build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
- $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
-
-build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
- $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
-
-build:
- @mkdir -p $@
-
-.PHONY: clean
-clean:
- rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
- test -d build && rmdir -p build || true
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
deleted file mode 100644
index cdd51b247628..000000000000
--- a/examples/flow_classify/flow_classify.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#include <stdint.h>
-#include <inttypes.h>
-#include <getopt.h>
-
-#include <rte_eal.h>
-#include <rte_ethdev.h>
-#include <rte_cycles.h>
-#include <rte_lcore.h>
-#include <rte_mbuf.h>
-#include <rte_flow.h>
-#include <rte_flow_classify.h>
-#include <rte_table_acl.h>
-
-#define RX_RING_SIZE 1024
-#define TX_RING_SIZE 1024
-
-#define NUM_MBUFS 8191
-#define MBUF_CACHE_SIZE 250
-#define BURST_SIZE 32
-
-#define MAX_NUM_CLASSIFY 30
-#define FLOW_CLASSIFY_MAX_RULE_NUM 91
-#define FLOW_CLASSIFY_MAX_PRIORITY 8
-#define FLOW_CLASSIFIER_NAME_SIZE 64
-
-#define COMMENT_LEAD_CHAR ('#')
-#define OPTION_RULE_IPV4 "rule_ipv4"
-#define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3
-#define flow_classify_log(format, ...) \
- RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
-
-#define uint32_t_to_char(ip, a, b, c, d) do {\
- *a = (unsigned char)(ip >> 24 & 0xff);\
- *b = (unsigned char)(ip >> 16 & 0xff);\
- *c = (unsigned char)(ip >> 8 & 0xff);\
- *d = (unsigned char)(ip & 0xff);\
- } while (0)
-
-enum {
- CB_FLD_SRC_ADDR,
- CB_FLD_DST_ADDR,
- CB_FLD_SRC_PORT,
- CB_FLD_SRC_PORT_DLM,
- CB_FLD_SRC_PORT_MASK,
- CB_FLD_DST_PORT,
- CB_FLD_DST_PORT_DLM,
- CB_FLD_DST_PORT_MASK,
- CB_FLD_PROTO,
- CB_FLD_PRIORITY,
- CB_FLD_NUM,
-};
-
-static struct{
- const char *rule_ipv4_name;
-} parm_config;
-const char cb_port_delim[] = ":";
-
-/* Creation of flow classifier object. 8< */
-struct flow_classifier {
- struct rte_flow_classifier *cls;
-};
-
-struct flow_classifier_acl {
- struct flow_classifier cls;
-} __rte_cache_aligned;
-/* >8 End of creation of flow classifier object. */
-
-/* Creation of ACL table during initialization of application. 8< */
-
-/* ACL field definitions for IPv4 5 tuple rule */
-enum {
- PROTO_FIELD_IPV4,
- SRC_FIELD_IPV4,
- DST_FIELD_IPV4,
- SRCP_FIELD_IPV4,
- DSTP_FIELD_IPV4,
- NUM_FIELDS_IPV4
-};
-
-enum {
- PROTO_INPUT_IPV4,
- SRC_INPUT_IPV4,
- DST_INPUT_IPV4,
- SRCP_DESTP_INPUT_IPV4
-};
-
-static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
- /* first input field - always one byte long. */
- {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = PROTO_FIELD_IPV4,
- .input_index = PROTO_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, next_proto_id),
- },
- /* next input field (IPv4 source address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = SRC_FIELD_IPV4,
- .input_index = SRC_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, src_addr),
- },
- /* next input field (IPv4 destination address) - 4 consecutive bytes. */
- {
- /* rte_flow uses a bit mask for IPv4 addresses */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint32_t),
- .field_index = DST_FIELD_IPV4,
- .input_index = DST_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- offsetof(struct rte_ipv4_hdr, dst_addr),
- },
- /*
- * Next 2 fields (src & dst ports) form 4 consecutive bytes.
- * They share the same input index.
- */
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = SRCP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, src_port),
- },
- {
- /* rte_flow uses a bit mask for protocol ports */
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint16_t),
- .field_index = DSTP_FIELD_IPV4,
- .input_index = SRCP_DESTP_INPUT_IPV4,
- .offset = sizeof(struct rte_ether_hdr) +
- sizeof(struct rte_ipv4_hdr) +
- offsetof(struct rte_tcp_hdr, dst_port),
- },
-};
-/* >8 End of creation of ACL table. */
-
-/* Flow classify data. 8< */
-static int num_classify_rules;
-static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
-static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
-static struct rte_flow_classify_stats classify_stats = {
- .stats = (void **)&ntuple_stats
-};
-/* >8 End of flow classify data. */
-
-/* parameters for rte_flow_classify_validate and
- * rte_flow_classify_table_entry_add functions
- */
-
-static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
- 0, 0, 0 };
-static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
- 0, 0, 0 };
-
-/* sample actions:
- * "actions count / end"
- */
-struct rte_flow_query_count count = {
- .reset = 1,
- .hits_set = 1,
- .bytes_set = 1,
- .hits = 0,
- .bytes = 0,
-};
-static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
- &count};
-static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
-static struct rte_flow_action actions[2];
-
-/* sample attributes */
-static struct rte_flow_attr attr;
-
-/* flow_classify.c: * Based on DPDK skeleton forwarding example. */
-
-/*
- * Initializes a given port using global settings and with the RX buffers
- * coming from the mbuf_pool passed as a parameter.
- */
-
-/* Initializing port using global settings. 8< */
-static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
-{
- struct rte_eth_conf port_conf;
- struct rte_ether_addr addr;
- const uint16_t rx_rings = 1, tx_rings = 1;
- int retval;
- uint16_t q;
- struct rte_eth_dev_info dev_info;
- struct rte_eth_txconf txconf;
-
- if (!rte_eth_dev_is_valid_port(port))
- return -1;
-
- memset(&port_conf, 0, sizeof(struct rte_eth_conf));
-
- retval = rte_eth_dev_info_get(port, &dev_info);
- if (retval != 0) {
- printf("Error during getting device (port %u) info: %s\n",
- port, strerror(-retval));
- return retval;
- }
-
- if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
- port_conf.txmode.offloads |=
- RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
-
- /* Configure the Ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- /* Allocate and set up 1 RX queue per Ethernet port. */
- for (q = 0; q < rx_rings; q++) {
- retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
- rte_eth_dev_socket_id(port), NULL, mbuf_pool);
- if (retval < 0)
- return retval;
- }
-
- txconf = dev_info.default_txconf;
- txconf.offloads = port_conf.txmode.offloads;
- /* Allocate and set up 1 TX queue per Ethernet port. */
- for (q = 0; q < tx_rings; q++) {
- retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
- rte_eth_dev_socket_id(port), &txconf);
- if (retval < 0)
- return retval;
- }
-
- /* Start the Ethernet port. 8< */
- retval = rte_eth_dev_start(port);
- /* >8 End of starting the Ethernet port. */
- if (retval < 0)
- return retval;
-
- /* Display the port MAC address. */
- retval = rte_eth_macaddr_get(port, &addr);
- if (retval != 0)
- return retval;
-
- printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
- " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- port, RTE_ETHER_ADDR_BYTES(&addr));
-
- /* Enable RX in promiscuous mode for the Ethernet device. */
- retval = rte_eth_promiscuous_enable(port);
- if (retval != 0)
- return retval;
-
- return 0;
-}
-/* >8 End of initializing a given port. */
-
-/*
- * The lcore main. This is the main thread that does the work, reading from
- * an input port classifying the packets and writing to an output port.
- */
-
-/* Classifying the packets. 8< */
-static __rte_noreturn void
-lcore_main(struct flow_classifier *cls_app)
-{
- uint16_t port;
- int ret;
- int i = 0;
-
- ret = rte_flow_classify_table_entry_delete(cls_app->cls,
- rules[7]);
- if (ret)
- printf("table_entry_delete failed [7] %d\n\n", ret);
- else
- printf("table_entry_delete succeeded [7]\n\n");
-
- /*
- * Check that the port is on the same NUMA node as the polling thread
- * for best performance.
- */
- RTE_ETH_FOREACH_DEV(port)
- if (rte_eth_dev_socket_id(port) >= 0 &&
- rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
- printf("\n\n");
- printf("WARNING: port %u is on remote NUMA node\n",
- port);
- printf("to polling thread.\n");
- printf("Performance will not be optimal.\n");
- }
- printf("\nCore %u forwarding packets. ", rte_lcore_id());
- printf("[Ctrl+C to quit]\n");
-
- /* Run until the application is quit or killed. 8< */
- for (;;) {
- /*
- * Receive packets on a port, classify them and forward them
- * on the paired port.
- * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
- */
- RTE_ETH_FOREACH_DEV(port) {
- /* Get burst of RX packets, from first port of pair. */
- struct rte_mbuf *bufs[BURST_SIZE];
- const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
- bufs, BURST_SIZE);
-
- if (unlikely(nb_rx == 0))
- continue;
-
- for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
- if (rules[i]) {
- ret = rte_flow_classifier_query(
- cls_app->cls,
- bufs, nb_rx, rules[i],
- &classify_stats);
- if (ret)
- printf(
- "rule [%d] query failed ret [%d]\n\n",
- i, ret);
- else {
- printf(
- "rule[%d] count=%"PRIu64"\n",
- i, ntuple_stats.counter1);
-
- printf("proto = %d\n",
- ntuple_stats.ipv4_5tuple.proto);
- }
- }
- }
-
- /* Send burst of TX packets, to second port of pair. */
- const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
- bufs, nb_rx);
-
- /* Free any unsent packets. */
- if (unlikely(nb_tx < nb_rx)) {
- uint16_t buf;
-
- for (buf = nb_tx; buf < nb_rx; buf++)
- rte_pktmbuf_free(bufs[buf]);
- }
- }
- }
- /* >8 End of main loop. */
-}
-/* >8 End of lcore main. */
-
-/*
- * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
- * Expected format:
- * <src_ipv4_addr>'/'<masklen> <space> \
- * <dst_ipv4_addr>'/'<masklen> <space> \
- * <src_port> <space> ":" <src_port_mask> <space> \
- * <dst_port> <space> ":" <dst_port_mask> <space> \
- * <proto>'/'<proto_mask> <space> \
- * <priority>
- */
-
-static int
-get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
- char dlm)
-{
- unsigned long val;
- char *end;
-
- errno = 0;
- val = strtoul(*in, &end, base);
- if (errno != 0 || end[0] != dlm || val > lim)
- return -EINVAL;
- *fd = (uint32_t)val;
- *in = end + 1;
- return 0;
-}
-
-static int
-parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
-{
- uint32_t a, b, c, d, m;
-
- if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
- return -EINVAL;
- if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
- return -EINVAL;
- if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
- return -EINVAL;
- if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
- return -EINVAL;
- if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
- return -EINVAL;
-
- addr[0] = RTE_IPV4(a, b, c, d);
- mask_len[0] = m;
- return 0;
-}
-
-static int
-parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
-{
- int i, ret;
- char *s, *sp, *in[CB_FLD_NUM];
- static const char *dlm = " \t\n";
- int dim = CB_FLD_NUM;
- uint32_t temp;
-
- s = str;
- for (i = 0; i != dim; i++, s = NULL) {
- in[i] = strtok_r(s, dlm, &sp);
- if (in[i] == NULL)
- return -EINVAL;
- }
-
- ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
- &ntuple_filter->src_ip,
- &ntuple_filter->src_ip_mask);
- if (ret != 0) {
- flow_classify_log("failed to read source address/mask: %s\n",
- in[CB_FLD_SRC_ADDR]);
- return ret;
- }
-
- ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
- &ntuple_filter->dst_ip,
- &ntuple_filter->dst_ip_mask);
- if (ret != 0) {
- flow_classify_log("failed to read destination address/mask: %s\n",
- in[CB_FLD_DST_ADDR]);
- return ret;
- }
-
- if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
- return -EINVAL;
- ntuple_filter->src_port = (uint16_t)temp;
-
- if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
- sizeof(cb_port_delim)) != 0)
- return -EINVAL;
-
- if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
- return -EINVAL;
- ntuple_filter->src_port_mask = (uint16_t)temp;
-
- if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
- return -EINVAL;
- ntuple_filter->dst_port = (uint16_t)temp;
-
- if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
- sizeof(cb_port_delim)) != 0)
- return -EINVAL;
-
- if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
- return -EINVAL;
- ntuple_filter->dst_port_mask = (uint16_t)temp;
-
- if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
- return -EINVAL;
- ntuple_filter->proto = (uint8_t)temp;
-
- if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
- return -EINVAL;
- ntuple_filter->proto_mask = (uint8_t)temp;
-
- if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
- return -EINVAL;
- ntuple_filter->priority = (uint16_t)temp;
- if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
- ret = -EINVAL;
-
- return ret;
-}
-
-/* Bypass comment and empty lines */
-static inline int
-is_bypass_line(char *buff)
-{
- int i = 0;
-
- /* comment line */
- if (buff[0] == COMMENT_LEAD_CHAR)
- return 1;
- /* empty line */
- while (buff[i] != '\0') {
- if (!isspace(buff[i]))
- return 0;
- i++;
- }
- return 1;
-}
-
-static uint32_t
-convert_depth_to_bitmask(uint32_t depth_val)
-{
- uint32_t bitmask = 0;
- int i, j;
-
- for (i = depth_val, j = 0; i > 0; i--, j++)
- bitmask |= (1 << (31 - j));
- return bitmask;
-}
-
-static int
-add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
- struct flow_classifier *cls_app)
-{
- int ret = -1;
- int key_found;
- struct rte_flow_error error;
- struct rte_flow_item_ipv4 ipv4_spec;
- struct rte_flow_item_ipv4 ipv4_mask;
- struct rte_flow_item ipv4_udp_item;
- struct rte_flow_item ipv4_tcp_item;
- struct rte_flow_item ipv4_sctp_item;
- struct rte_flow_item_udp udp_spec;
- struct rte_flow_item_udp udp_mask;
- struct rte_flow_item udp_item;
- struct rte_flow_item_tcp tcp_spec;
- struct rte_flow_item_tcp tcp_mask;
- struct rte_flow_item tcp_item;
- struct rte_flow_item_sctp sctp_spec;
- struct rte_flow_item_sctp sctp_mask;
- struct rte_flow_item sctp_item;
- struct rte_flow_item pattern_ipv4_5tuple[4];
- struct rte_flow_classify_rule *rule;
- uint8_t ipv4_proto;
-
- if (num_classify_rules >= MAX_NUM_CLASSIFY) {
- printf(
- "\nINFO: classify rule capacity %d reached\n",
- num_classify_rules);
- return ret;
- }
-
- /* set up parameters for validate and add */
- memset(&ipv4_spec, 0, sizeof(ipv4_spec));
- ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
- ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
- ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
- ipv4_proto = ipv4_spec.hdr.next_proto_id;
-
- memset(&ipv4_mask, 0, sizeof(ipv4_mask));
- ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
- ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
- ipv4_mask.hdr.src_addr =
- convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
- ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
- ipv4_mask.hdr.dst_addr =
- convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
-
- switch (ipv4_proto) {
- case IPPROTO_UDP:
- ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_udp_item.spec = &ipv4_spec;
- ipv4_udp_item.mask = &ipv4_mask;
- ipv4_udp_item.last = NULL;
-
- udp_spec.hdr.src_port = ntuple_filter->src_port;
- udp_spec.hdr.dst_port = ntuple_filter->dst_port;
- udp_spec.hdr.dgram_len = 0;
- udp_spec.hdr.dgram_cksum = 0;
-
- udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
- udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
- udp_mask.hdr.dgram_len = 0;
- udp_mask.hdr.dgram_cksum = 0;
-
- udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
- udp_item.spec = &udp_spec;
- udp_item.mask = &udp_mask;
- udp_item.last = NULL;
-
- attr.priority = ntuple_filter->priority;
- pattern_ipv4_5tuple[1] = ipv4_udp_item;
- pattern_ipv4_5tuple[2] = udp_item;
- break;
- case IPPROTO_TCP:
- ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_tcp_item.spec = &ipv4_spec;
- ipv4_tcp_item.mask = &ipv4_mask;
- ipv4_tcp_item.last = NULL;
-
- memset(&tcp_spec, 0, sizeof(tcp_spec));
- tcp_spec.hdr.src_port = ntuple_filter->src_port;
- tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
-
- memset(&tcp_mask, 0, sizeof(tcp_mask));
- tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
- tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
-
- tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
- tcp_item.spec = &tcp_spec;
- tcp_item.mask = &tcp_mask;
- tcp_item.last = NULL;
-
- attr.priority = ntuple_filter->priority;
- pattern_ipv4_5tuple[1] = ipv4_tcp_item;
- pattern_ipv4_5tuple[2] = tcp_item;
- break;
- case IPPROTO_SCTP:
- ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_sctp_item.spec = &ipv4_spec;
- ipv4_sctp_item.mask = &ipv4_mask;
- ipv4_sctp_item.last = NULL;
-
- sctp_spec.hdr.src_port = ntuple_filter->src_port;
- sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
- sctp_spec.hdr.cksum = 0;
- sctp_spec.hdr.tag = 0;
-
- sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
- sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
- sctp_mask.hdr.cksum = 0;
- sctp_mask.hdr.tag = 0;
-
- sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
- sctp_item.spec = &sctp_spec;
- sctp_item.mask = &sctp_mask;
- sctp_item.last = NULL;
-
- attr.priority = ntuple_filter->priority;
- pattern_ipv4_5tuple[1] = ipv4_sctp_item;
- pattern_ipv4_5tuple[2] = sctp_item;
- break;
- default:
- return ret;
- }
-
- attr.ingress = 1;
- pattern_ipv4_5tuple[0] = eth_item;
- pattern_ipv4_5tuple[3] = end_item;
- actions[0] = count_action;
- actions[1] = end_action;
-
- /* Validate and add rule */
- ret = rte_flow_classify_validate(cls_app->cls, &attr,
- pattern_ipv4_5tuple, actions, &error);
- if (ret) {
- printf("table entry validate failed ipv4_proto = %u\n",
- ipv4_proto);
- return ret;
- }
-
- rule = rte_flow_classify_table_entry_add(
- cls_app->cls, &attr, pattern_ipv4_5tuple,
- actions, &key_found, &error);
- if (rule == NULL) {
- printf("table entry add failed ipv4_proto = %u\n",
- ipv4_proto);
- ret = -1;
- return ret;
- }
-
- rules[num_classify_rules] = rule;
- num_classify_rules++;
- return 0;
-}
-
-/* Reads file and calls the add_classify_rule function. 8< */
-static int
-add_rules(const char *rule_path, struct flow_classifier *cls_app)
-{
- FILE *fh;
- char buff[LINE_MAX];
- unsigned int i = 0;
- unsigned int total_num = 0;
- struct rte_eth_ntuple_filter ntuple_filter;
- int ret;
-
- fh = fopen(rule_path, "rb");
- if (fh == NULL)
- rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
- rule_path);
-
- ret = fseek(fh, 0, SEEK_SET);
- if (ret)
- rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
- ret);
-
- i = 0;
- while (fgets(buff, LINE_MAX, fh) != NULL) {
- i++;
-
- if (is_bypass_line(buff))
- continue;
-
- if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
- printf("\nINFO: classify rule capacity %d reached\n",
- total_num);
- break;
- }
-
- if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
- rte_exit(EXIT_FAILURE,
- "%s Line %u: parse rules error\n",
- rule_path, i);
-
- if (add_classify_rule(&ntuple_filter, cls_app) != 0)
- rte_exit(EXIT_FAILURE, "add rule error\n");
-
- total_num++;
- }
-
- fclose(fh);
- return 0;
-}
-/* >8 End of add_rules. */
-
-/* display usage */
-static void
-print_usage(const char *prgname)
-{
- printf("%s usage:\n", prgname);
- printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: ");
- printf("specify the ipv4 rules file.\n");
- printf("Each rule occupies one line in the file.\n");
-}
-
-/* Parse the argument given in the command line of the application */
-static int
-parse_args(int argc, char **argv)
-{
- int opt, ret;
- char **argvopt;
- int option_index;
- char *prgname = argv[0];
- static struct option lgopts[] = {
- {OPTION_RULE_IPV4, 1, 0, 0},
- {NULL, 0, 0, 0}
- };
-
- argvopt = argv;
-
- while ((opt = getopt_long(argc, argvopt, "",
- lgopts, &option_index)) != EOF) {
-
- switch (opt) {
- /* long options */
- case 0:
- if (!strncmp(lgopts[option_index].name,
- OPTION_RULE_IPV4,
- sizeof(OPTION_RULE_IPV4)))
- parm_config.rule_ipv4_name = optarg;
- break;
- default:
- print_usage(prgname);
- return -1;
- }
- }
-
- if (optind >= 0)
- argv[optind-1] = prgname;
-
- ret = optind-1;
- optind = 1; /* reset getopt lib */
- return ret;
-}
-
-/*
- * The main function, which does initialization and calls the lcore_main
- * function.
- */
-int
-main(int argc, char *argv[])
-{
- struct rte_mempool *mbuf_pool;
- uint16_t nb_ports;
- uint16_t portid;
- int ret;
- int socket_id;
- struct rte_table_acl_params table_acl_params;
- struct rte_flow_classify_table_params cls_table_params;
- struct flow_classifier *cls_app;
- struct rte_flow_classifier_params cls_params;
- uint32_t size;
-
- /* Initialize the Environment Abstraction Layer (EAL). 8< */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- /* >8 End of initialization of EAL. */
-
- argc -= ret;
- argv += ret;
-
- /* Parse application arguments (after the EAL ones). 8< */
- ret = parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
- /* >8 End of parse application arguments. */
-
- /* Check that there is an even number of ports to send/receive on. */
- nb_ports = rte_eth_dev_count_avail();
- if (nb_ports < 2 || (nb_ports & 1))
- rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
-
- /* Creates a new mempool in memory to hold the mbufs. 8< */
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
- MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
- /* >8 End of creation of new mempool in memory. */
-
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- /* Initialize all ports. 8< */
- RTE_ETH_FOREACH_DEV(portid)
- if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
- portid);
- /* >8 End of initialization of all ports. */
-
- if (rte_lcore_count() > 1)
- printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
-
- socket_id = rte_eth_dev_socket_id(0);
- if (socket_id == SOCKET_ID_ANY)
- socket_id = rte_lcore_to_socket_id(rte_get_next_lcore(-1, 0, 0));
-
- /* Memory allocation. 8< */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
- cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (cls_app == NULL)
- rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
-
- cls_params.name = "flow_classifier";
- cls_params.socket_id = socket_id;
-
- cls_app->cls = rte_flow_classifier_create(&cls_params);
- if (cls_app->cls == NULL) {
- rte_free(cls_app);
- rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
- }
-
- /* initialise ACL table params */
- table_acl_params.name = "table_acl_ipv4_5tuple";
- table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
- table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
- memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
-
- /* initialise table create params */
- cls_table_params.ops = &rte_table_acl_ops;
- cls_table_params.arg_create = &table_acl_params;
- cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
-
- ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
- if (ret) {
- rte_flow_classifier_free(cls_app->cls);
- rte_free(cls_app);
- rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
- }
- /* >8 End of initialization of table create params. */
-
- /* read file of IPv4 5 tuple rules and initialize parameters
- * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
- * API's.
- */
-
- /* Read file of IPv4 tuple rules. 8< */
- if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
- rte_flow_classifier_free(cls_app->cls);
- rte_free(cls_app);
- rte_exit(EXIT_FAILURE, "Failed to add rules\n");
- }
- /* >8 End of reading file of IPv4 5 tuple rules. */
-
- /* Call lcore_main on the main core only. */
- lcore_main(cls_app);
-
- /* clean up the EAL */
- rte_eal_cleanup();
-
- return 0;
-}
diff --git a/examples/flow_classify/ipv4_rules_file.txt b/examples/flow_classify/ipv4_rules_file.txt
deleted file mode 100644
index cd5215736aaf..000000000000
--- a/examples/flow_classify/ipv4_rules_file.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-#file format:
-#src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
-#
-2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
-9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
-9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
-9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
-6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
-6.7.8.9/32 192.168.0.36/32 10 : 0xffff 11 : 0xffff 6/0xfe 5
-6.7.8.9/24 192.168.0.36/24 10 : 0xffff 11 : 0xffff 6/0xfe 6
-6.7.8.9/16 192.168.0.36/16 10 : 0xffff 11 : 0xffff 6/0xfe 7
-6.7.8.9/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 8
-#error rules
-#9.8.7.6/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 9
diff --git a/examples/flow_classify/meson.build b/examples/flow_classify/meson.build
deleted file mode 100644
index 1be1bf037427..000000000000
--- a/examples/flow_classify/meson.build
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
-
-# meson file, for building this example as part of a main DPDK build.
-#
-# To build this example as a standalone application with an already-installed
-# DPDK instance, use 'make'
-
-deps += 'flow_classify'
-allow_experimental_apis = true
-sources = files(
- 'flow_classify.c',
-)
diff --git a/examples/meson.build b/examples/meson.build
index 55ba8847a053..65c8303a9d23 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -16,7 +16,6 @@ all_examples = [
'ethtool',
'eventdev_pipeline',
'fips_validation',
- 'flow_classify',
'flow_filtering',
'helloworld',
'ip_fragmentation',
diff --git a/lib/flow_classify/meson.build b/lib/flow_classify/meson.build
deleted file mode 100644
index 3bb861c68fb5..000000000000
--- a/lib/flow_classify/meson.build
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
-
-if is_windows
- build = false
- reason = 'not supported on Windows'
- subdir_done()
-endif
-
-sources = files('rte_flow_classify.c', 'rte_flow_classify_parse.c')
-headers = files('rte_flow_classify.h')
-deps += ['net', 'table']
diff --git a/lib/flow_classify/rte_flow_classify.c b/lib/flow_classify/rte_flow_classify.c
deleted file mode 100644
index 60ca319f24e4..000000000000
--- a/lib/flow_classify/rte_flow_classify.c
+++ /dev/null
@@ -1,670 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#include <rte_string_fns.h>
-#include <rte_flow_classify.h>
-#include "rte_flow_classify_parse.h"
-#include <rte_table_acl.h>
-
-static uint32_t unique_id = 1;
-
-enum rte_flow_classify_table_type table_type
- = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE;
-
-struct rte_flow_classify_table_entry {
- /* meta-data for classify rule */
- uint32_t rule_id;
-
- /* Flow action */
- struct classify_action action;
-};
-
-struct rte_cls_table {
- /* Input parameters */
- struct rte_table_ops ops;
- uint32_t entry_size;
- enum rte_flow_classify_table_type type;
-
- /* Handle to the low-level table object */
- void *h_table;
-};
-
-#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
-
-struct rte_flow_classifier {
- /* Input parameters */
- char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
- int socket_id;
-
- /* Internal */
- /* ntuple_filter */
- struct rte_eth_ntuple_filter ntuple_filter;
-
- /* classifier tables */
- struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
- uint32_t table_mask;
- uint32_t num_tables;
-
- uint16_t nb_pkts;
- struct rte_flow_classify_table_entry
- *entries[RTE_PORT_IN_BURST_SIZE_MAX];
-} __rte_cache_aligned;
-
-enum {
- PROTO_FIELD_IPV4,
- SRC_FIELD_IPV4,
- DST_FIELD_IPV4,
- SRCP_FIELD_IPV4,
- DSTP_FIELD_IPV4,
- NUM_FIELDS_IPV4
-};
-
-struct acl_keys {
- struct rte_table_acl_rule_add_params key_add; /* add key */
- struct rte_table_acl_rule_delete_params key_del; /* delete key */
-};
-
-struct classify_rules {
- enum rte_flow_classify_rule_type type;
- union {
- struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
- } u;
-};
-
-struct rte_flow_classify_rule {
- uint32_t id; /* unique ID of classify rule */
- enum rte_flow_classify_table_type tbl_type; /* rule table */
- struct classify_rules rules; /* union of rules */
- union {
- struct acl_keys key;
- } u;
- int key_found; /* rule key found in table */
- struct rte_flow_classify_table_entry entry; /* rule meta data */
- void *entry_ptr; /* handle to the table entry for rule meta data */
-};
-
-int
-rte_flow_classify_validate(
- struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct rte_flow_item *items;
- parse_filter_t parse_filter;
- uint32_t item_num = 0;
- uint32_t i = 0;
- int ret;
-
- if (error == NULL)
- return -EINVAL;
-
- if (cls == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: rte_flow_classifier parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- if (!attr) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR,
- NULL, "NULL attribute.");
- return -EINVAL;
- }
-
- if (!pattern) {
- rte_flow_error_set(error,
- EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "NULL pattern.");
- return -EINVAL;
- }
-
- if (!actions) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
- NULL, "NULL action.");
- return -EINVAL;
- }
-
- memset(&cls->ntuple_filter, 0, sizeof(cls->ntuple_filter));
-
- /* Get the non-void item number of pattern */
- while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
- if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
- item_num++;
- i++;
- }
- item_num++;
-
- items = malloc(item_num * sizeof(struct rte_flow_item));
- if (!items) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "No memory for pattern items.");
- return -ENOMEM;
- }
-
- memset(items, 0, item_num * sizeof(struct rte_flow_item));
- classify_pattern_skip_void_item(items, pattern);
-
- parse_filter = classify_find_parse_filter_func(items);
- if (!parse_filter) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "Unsupported pattern");
- free(items);
- return -EINVAL;
- }
-
- ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error);
- free(items);
- return ret;
-}
-
-
-#define uint32_t_to_char(ip, a, b, c, d) do {\
- *a = (unsigned char)(ip >> 24 & 0xff);\
- *b = (unsigned char)(ip >> 16 & 0xff);\
- *c = (unsigned char)(ip >> 8 & 0xff);\
- *d = (unsigned char)(ip & 0xff);\
- } while (0)
-
-static inline void
-print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
-{
- unsigned char a, b, c, d;
-
- printf("%s: 0x%02hhx/0x%hhx ", __func__,
- key->field_value[PROTO_FIELD_IPV4].value.u8,
- key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
-
- uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
- &a, &b, &c, &d);
- printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
- key->field_value[SRC_FIELD_IPV4].mask_range.u32);
-
- uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
- &a, &b, &c, &d);
- printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
- key->field_value[DST_FIELD_IPV4].mask_range.u32);
-
- printf("%hu : 0x%x %hu : 0x%x",
- key->field_value[SRCP_FIELD_IPV4].value.u16,
- key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
- key->field_value[DSTP_FIELD_IPV4].value.u16,
- key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
-
- printf(" priority: 0x%x\n", key->priority);
-}
-
-static inline void
-print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
-{
- unsigned char a, b, c, d;
-
- printf("%s: 0x%02hhx/0x%hhx ", __func__,
- key->field_value[PROTO_FIELD_IPV4].value.u8,
- key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
-
- uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
- &a, &b, &c, &d);
- printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
- key->field_value[SRC_FIELD_IPV4].mask_range.u32);
-
- uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
- &a, &b, &c, &d);
- printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
- key->field_value[DST_FIELD_IPV4].mask_range.u32);
-
- printf("%hu : 0x%x %hu : 0x%x\n",
- key->field_value[SRCP_FIELD_IPV4].value.u16,
- key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
- key->field_value[DSTP_FIELD_IPV4].value.u16,
- key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
-}
-
-static int
-rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
-{
- if (params == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: Incorrect value for parameter params\n", __func__);
- return -EINVAL;
- }
-
- /* name */
- if (params->name == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: Incorrect value for parameter name\n", __func__);
- return -EINVAL;
- }
-
- /* socket */
- if (params->socket_id < 0) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: Incorrect value for parameter socket_id\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct rte_flow_classifier *
-rte_flow_classifier_create(struct rte_flow_classifier_params *params)
-{
- struct rte_flow_classifier *cls;
- int ret;
-
- RTE_FLOW_CLASSIFY_LOG(WARNING,
- "WARNING: flow_classify is deprecated and will be removed in DPDK 23.11\n");
-
- /* Check input parameters */
- ret = rte_flow_classifier_check_params(params);
- if (ret != 0) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: flow classifier params check failed (%d)\n",
- __func__, ret);
- return NULL;
- }
-
- /* Allocate memory for the flow classifier */
- cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
- sizeof(struct rte_flow_classifier),
- RTE_CACHE_LINE_SIZE, params->socket_id);
-
- if (cls == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: flow classifier memory allocation failed\n",
- __func__);
- return NULL;
- }
-
- /* Save input parameters */
- strlcpy(cls->name, params->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ);
-
- cls->socket_id = params->socket_id;
-
- return cls;
-}
-
-static void
-rte_flow_classify_table_free(struct rte_cls_table *table)
-{
- if (table->ops.f_free != NULL)
- table->ops.f_free(table->h_table);
-}
-
-int
-rte_flow_classifier_free(struct rte_flow_classifier *cls)
-{
- uint32_t i;
-
- /* Check input parameters */
- if (cls == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: rte_flow_classifier parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- /* Free tables */
- for (i = 0; i < cls->num_tables; i++) {
- struct rte_cls_table *table = &cls->tables[i];
-
- rte_flow_classify_table_free(table);
- }
-
- /* Free flow classifier memory */
- rte_free(cls);
-
- return 0;
-}
-
-static int
-rte_table_check_params(struct rte_flow_classifier *cls,
- struct rte_flow_classify_table_params *params)
-{
- if (cls == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: flow classifier parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
- if (params == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- /* ops */
- if (params->ops == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- if (params->ops->f_create == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: f_create function pointer is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (params->ops->f_lookup == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: f_lookup function pointer is NULL\n", __func__);
- return -EINVAL;
- }
-
- /* De we have room for one more table? */
- if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
- RTE_FLOW_CLASSIFY_LOG(ERR,
- "%s: Incorrect value for num_tables parameter\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int
-rte_flow_classify_table_create(struct rte_flow_classifier *cls,
- struct rte_flow_classify_table_params *params)
-{
- struct rte_cls_table *table;
- void *h_table;
- uint32_t entry_size;
- int ret;
-
- /* Check input arguments */
- ret = rte_table_check_params(cls, params);
- if (ret != 0)
- return ret;
-
- /* calculate table entry size */
- entry_size = sizeof(struct rte_flow_classify_table_entry);
-
- /* Create the table */
- h_table = params->ops->f_create(params->arg_create, cls->socket_id,
- entry_size);
- if (h_table == NULL) {
- RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
- __func__);
- return -EINVAL;
- }
-
- /* Commit current table to the classifier */
- table = &cls->tables[cls->num_tables];
- table->type = params->type;
- cls->num_tables++;
-
- /* Save input parameters */
- memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
-
- /* Initialize table internal data structure */
- table->entry_size = entry_size;
- table->h_table = h_table;
-
- return 0;
-}
-
-static struct rte_flow_classify_rule *
-allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier *cls)
-{
- struct rte_flow_classify_rule *rule;
-
- rule = malloc(sizeof(struct rte_flow_classify_rule));
- if (!rule)
- return rule;
-
- memset(rule, 0, sizeof(struct rte_flow_classify_rule));
- rule->id = unique_id++;
- rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
-
- /* key add values */
- rule->u.key.key_add.priority = cls->ntuple_filter.priority;
- rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
- cls->ntuple_filter.proto_mask;
- rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
- cls->ntuple_filter.proto;
- rule->rules.u.ipv4_5tuple.proto = cls->ntuple_filter.proto;
- rule->rules.u.ipv4_5tuple.proto_mask = cls->ntuple_filter.proto_mask;
-
- rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
- cls->ntuple_filter.src_ip_mask;
- rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
- cls->ntuple_filter.src_ip;
- rule->rules.u.ipv4_5tuple.src_ip_mask = cls->ntuple_filter.src_ip_mask;
- rule->rules.u.ipv4_5tuple.src_ip = cls->ntuple_filter.src_ip;
-
- rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
- cls->ntuple_filter.dst_ip_mask;
- rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
- cls->ntuple_filter.dst_ip;
- rule->rules.u.ipv4_5tuple.dst_ip_mask = cls->ntuple_filter.dst_ip_mask;
- rule->rules.u.ipv4_5tuple.dst_ip = cls->ntuple_filter.dst_ip;
-
- rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
- cls->ntuple_filter.src_port_mask;
- rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
- cls->ntuple_filter.src_port;
- rule->rules.u.ipv4_5tuple.src_port_mask =
- cls->ntuple_filter.src_port_mask;
- rule->rules.u.ipv4_5tuple.src_port = cls->ntuple_filter.src_port;
-
- rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
- cls->ntuple_filter.dst_port_mask;
- rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
- cls->ntuple_filter.dst_port;
- rule->rules.u.ipv4_5tuple.dst_port_mask =
- cls->ntuple_filter.dst_port_mask;
- rule->rules.u.ipv4_5tuple.dst_port = cls->ntuple_filter.dst_port;
-
- if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
- print_acl_ipv4_key_add(&rule->u.key.key_add);
-
- /* key delete values */
- memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
- &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
- NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
-
- if (rte_log_can_log(librte_flow_classify_logtype, RTE_LOG_DEBUG))
- print_acl_ipv4_key_delete(&rule->u.key.key_del);
-
- return rule;
-}
-
-struct rte_flow_classify_rule *
-rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- int *key_found,
- struct rte_flow_error *error)
-{
- struct rte_flow_classify_rule *rule;
- struct rte_flow_classify_table_entry *table_entry;
- struct classify_action *action;
- uint32_t i;
- int ret;
-
- if (!error)
- return NULL;
-
- if (key_found == NULL) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "NULL key_found.");
- return NULL;
- }
-
- /* parse attr, pattern and actions */
- ret = rte_flow_classify_validate(cls, attr, pattern, actions, error);
- if (ret < 0)
- return NULL;
-
- switch (table_type) {
- case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE:
- rule = allocate_acl_ipv4_5tuple_rule(cls);
- if (!rule)
- return NULL;
- rule->tbl_type = table_type;
- cls->table_mask |= table_type;
- break;
- default:
- return NULL;
- }
-
- action = classify_get_flow_action();
- table_entry = &rule->entry;
- table_entry->rule_id = rule->id;
- table_entry->action.action_mask = action->action_mask;
-
- /* Copy actions */
- if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
- memcpy(&table_entry->action.act.counter, &action->act.counter,
- sizeof(table_entry->action.act.counter));
- }
- if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_MARK)) {
- memcpy(&table_entry->action.act.mark, &action->act.mark,
- sizeof(table_entry->action.act.mark));
- }
-
- for (i = 0; i < cls->num_tables; i++) {
- struct rte_cls_table *table = &cls->tables[i];
-
- if (table->type == table_type) {
- if (table->ops.f_add != NULL) {
- ret = table->ops.f_add(
- table->h_table,
- &rule->u.key.key_add,
- &rule->entry,
- &rule->key_found,
- &rule->entry_ptr);
- if (ret) {
- free(rule);
- return NULL;
- }
-
- *key_found = rule->key_found;
- }
-
- return rule;
- }
- }
- free(rule);
- return NULL;
-}
-
-int
-rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
- struct rte_flow_classify_rule *rule)
-{
- uint32_t i;
- int ret = -EINVAL;
-
- if (!cls || !rule)
- return ret;
- enum rte_flow_classify_table_type tbl_type = rule->tbl_type;
-
- for (i = 0; i < cls->num_tables; i++) {
- struct rte_cls_table *table = &cls->tables[i];
-
- if (table->type == tbl_type) {
- if (table->ops.f_delete != NULL) {
- ret = table->ops.f_delete(table->h_table,
- &rule->u.key.key_del,
- &rule->key_found,
- &rule->entry);
- if (ret == 0)
- free(rule);
- return ret;
- }
- }
- }
- return ret;
-}
-
-static int
-flow_classifier_lookup(struct rte_flow_classifier *cls,
- struct rte_cls_table *table,
- struct rte_mbuf **pkts,
- const uint16_t nb_pkts)
-{
- int ret = -EINVAL;
- uint64_t pkts_mask;
- uint64_t lookup_hit_mask;
-
- pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
- ret = table->ops.f_lookup(table->h_table,
- pkts, pkts_mask, &lookup_hit_mask,
- (void **)cls->entries);
-
- if (!ret && lookup_hit_mask)
- cls->nb_pkts = nb_pkts;
- else
- cls->nb_pkts = 0;
-
- return ret;
-}
-
-static int
-action_apply(struct rte_flow_classifier *cls,
- struct rte_flow_classify_rule *rule,
- struct rte_flow_classify_stats *stats)
-{
- struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
- struct rte_flow_classify_table_entry *entry = &rule->entry;
- uint64_t count = 0;
- uint32_t action_mask = entry->action.action_mask;
- int i, ret = -EINVAL;
-
- if (action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
- for (i = 0; i < cls->nb_pkts; i++) {
- if (rule->id == cls->entries[i]->rule_id)
- count++;
- }
- if (count) {
- ret = 0;
- ntuple_stats = stats->stats;
- ntuple_stats->counter1 = count;
- ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
- }
- }
- return ret;
-}
-
-int
-rte_flow_classifier_query(struct rte_flow_classifier *cls,
- struct rte_mbuf **pkts,
- const uint16_t nb_pkts,
- struct rte_flow_classify_rule *rule,
- struct rte_flow_classify_stats *stats)
-{
- enum rte_flow_classify_table_type tbl_type;
- uint32_t i;
- int ret = -EINVAL;
-
- if (!cls || !rule || !stats || !pkts || nb_pkts == 0)
- return ret;
-
- tbl_type = rule->tbl_type;
- for (i = 0; i < cls->num_tables; i++) {
- struct rte_cls_table *table = &cls->tables[i];
-
- if (table->type == tbl_type) {
- ret = flow_classifier_lookup(cls, table,
- pkts, nb_pkts);
- if (!ret) {
- ret = action_apply(cls, rule, stats);
- return ret;
- }
- }
- }
- return ret;
-}
-
-RTE_LOG_REGISTER_DEFAULT(librte_flow_classify_logtype, INFO);
diff --git a/lib/flow_classify/rte_flow_classify.h b/lib/flow_classify/rte_flow_classify.h
deleted file mode 100644
index 39512b620667..000000000000
--- a/lib/flow_classify/rte_flow_classify.h
+++ /dev/null
@@ -1,284 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef _RTE_FLOW_CLASSIFY_H_
-#define _RTE_FLOW_CLASSIFY_H_
-
-/**
- * @file
- *
- * RTE Flow Classify Library.
- *
- * @warning
- * @b EXPERIMENTAL:
- * All functions in this file may be changed or removed without prior notice.
- *
- * This library provides flow record information with some measured properties.
- *
- * Application should define the flow and measurement criteria (action) for it.
- *
- * The Library doesn't maintain any flow records itself, instead flow
- * information is returned to upper layer only for given packets.
- *
- * It is application's responsibility to call rte_flow_classifier_query()
- * for a burst of packets, just after receiving them or before transmitting
- * them.
- * Application should provide the flow type interested in, measurement to apply
- * to that flow in rte_flow_classify_table_entry_add() API, and should provide
- * the rte_flow_classifier object and storage to put results in for the
- * rte_flow_classifier_query() API.
- *
- * Usage:
- * - application calls rte_flow_classifier_create() to create an
- * rte_flow_classifier object.
- * - application calls rte_flow_classify_table_create() to create a table
- * in the rte_flow_classifier object.
- * - application calls rte_flow_classify_table_entry_add() to add a rule to
- * the table in the rte_flow_classifier object.
- * - application calls rte_flow_classifier_query() in a polling manner,
- * preferably after rte_eth_rx_burst(). This will cause the library to
- * match packet information to flow information with some measurements.
- * - rte_flow_classifier object can be destroyed when it is no longer needed
- * with rte_flow_classifier_free()
- */
-
-#include <rte_compat.h>
-#include <rte_common.h>
-#include <rte_flow.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern int librte_flow_classify_logtype;
-
-#define RTE_FLOW_CLASSIFY_LOG(level, ...) \
- rte_log(RTE_LOG_ ## level, \
- librte_flow_classify_logtype, \
- RTE_FMT("%s(): " RTE_FMT_HEAD(__VA_ARGS__,), \
- __func__, \
- RTE_FMT_TAIL(__VA_ARGS__,)))
-
-#ifndef RTE_FLOW_CLASSIFY_TABLE_MAX
-#define RTE_FLOW_CLASSIFY_TABLE_MAX 32
-#endif
-
-/** Opaque data type for flow classifier */
-struct rte_flow_classifier;
-
-/** Opaque data type for flow classify rule */
-struct rte_flow_classify_rule;
-
-/** Flow classify rule type */
-enum rte_flow_classify_rule_type {
- /** no type */
- RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,
- /** IPv4 5tuple type */
- RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE,
-};
-
-/** Flow classify table type */
-enum rte_flow_classify_table_type {
- /** No type */
- RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE = 1 << 0,
- /** ACL IP4 5TUPLE */
- RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE = 1 << 1,
- /** ACL VLAN IP4 5TUPLE */
- RTE_FLOW_CLASSIFY_TABLE_ACL_VLAN_IP4_5TUPLE = 1 << 2,
- /** ACL QinQ IP4 5TUPLE */
- RTE_FLOW_CLASSIFY_TABLE_ACL_QINQ_IP4_5TUPLE = 1 << 3,
-
-};
-
-/** Parameters for flow classifier creation */
-struct rte_flow_classifier_params {
- /** flow classifier name */
- const char *name;
-
- /** CPU socket ID where memory for the flow classifier and its */
- /** elements (tables) should be allocated */
- int socket_id;
-};
-
-/** Parameters for table creation */
-struct rte_flow_classify_table_params {
- /** Table operations (specific to each table type) */
- struct rte_table_ops *ops;
-
- /** Opaque param to be passed to the table create operation */
- void *arg_create;
-
- /** Classifier table type */
- enum rte_flow_classify_table_type type;
-};
-
-/** IPv4 5-tuple data */
-struct rte_flow_classify_ipv4_5tuple {
- uint32_t dst_ip; /**< Destination IP address in big endian. */
- uint32_t dst_ip_mask; /**< Mask of destination IP address. */
- uint32_t src_ip; /**< Source IP address in big endian. */
- uint32_t src_ip_mask; /**< Mask of destination IP address. */
- uint16_t dst_port; /**< Destination port in big endian. */
- uint16_t dst_port_mask; /**< Mask of destination port. */
- uint16_t src_port; /**< Source Port in big endian. */
- uint16_t src_port_mask; /**< Mask of source port. */
- uint8_t proto; /**< L4 protocol. */
- uint8_t proto_mask; /**< Mask of L4 protocol. */
-};
-
-/**
- * Flow stats
- *
- * For the count action, stats can be returned by the query API.
- *
- * Storage for stats is provided by application.
- */
-struct rte_flow_classify_stats {
- void *stats;
-};
-
-struct rte_flow_classify_ipv4_5tuple_stats {
- /** count of packets that match IPv4 5tuple pattern */
- uint64_t counter1;
- /** IPv4 5tuple data */
- struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
-};
-
-/**
- * Flow classifier create
- *
- * @param params
- * Parameters for flow classifier creation
- * @return
- * Handle to flow classifier instance on success or NULL otherwise
- */
-__rte_experimental
-struct rte_flow_classifier *
-rte_flow_classifier_create(struct rte_flow_classifier_params *params);
-
-/**
- * Flow classifier free
- *
- * @param cls
- * Handle to flow classifier instance
- * @return
- * 0 on success, error code otherwise
- */
-__rte_experimental
-int
-rte_flow_classifier_free(struct rte_flow_classifier *cls);
-
-/**
- * Flow classify table create
- *
- * @param cls
- * Handle to flow classifier instance
- * @param params
- * Parameters for flow_classify table creation
- * @return
- * 0 on success, error code otherwise
- */
-__rte_experimental
-int
-rte_flow_classify_table_create(struct rte_flow_classifier *cls,
- struct rte_flow_classify_table_params *params);
-
-/**
- * Flow classify validate
- *
- * @param cls
- * Handle to flow classifier instance
- * @param[in] attr
- * Flow rule attributes
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END pattern item).
- * @param[out] error
- * Perform verbose error reporting if not NULL. Structure
- * initialised in case of error only.
- * @return
- * 0 on success, error code otherwise
- */
-__rte_experimental
-int
-rte_flow_classify_validate(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error);
-
-/**
- * Add a flow classify rule to the flow_classifier table.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] attr
- * Flow rule attributes
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END pattern item).
- * @param[out] key_found
- * returns 1 if rule present already, 0 otherwise.
- * @param[out] error
- * Perform verbose error reporting if not NULL. Structure
- * initialised in case of error only.
- * @return
- * A valid handle in case of success, NULL otherwise.
- */
-__rte_experimental
-struct rte_flow_classify_rule *
-rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- int *key_found,
- struct rte_flow_error *error);
-
-/**
- * Delete a flow classify rule from the flow_classifier table.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] rule
- * Flow classify rule
- * @return
- * 0 on success, error code otherwise.
- */
-__rte_experimental
-int
-rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
- struct rte_flow_classify_rule *rule);
-
-/**
- * Query flow classifier for given rule.
- *
- * @param[in] cls
- * Flow classifier handle
- * @param[in] pkts
- * Pointer to packets to process
- * @param[in] nb_pkts
- * Number of packets to process
- * @param[in] rule
- * Flow classify rule
- * @param[in] stats
- * Flow classify stats
- *
- * @return
- * 0 on success, error code otherwise.
- */
-__rte_experimental
-int
-rte_flow_classifier_query(struct rte_flow_classifier *cls,
- struct rte_mbuf **pkts,
- const uint16_t nb_pkts,
- struct rte_flow_classify_rule *rule,
- struct rte_flow_classify_stats *stats);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_FLOW_CLASSIFY_H_ */
diff --git a/lib/flow_classify/rte_flow_classify_parse.c b/lib/flow_classify/rte_flow_classify_parse.c
deleted file mode 100644
index 345d129d35e0..000000000000
--- a/lib/flow_classify/rte_flow_classify_parse.c
+++ /dev/null
@@ -1,532 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#include <rte_flow_classify.h>
-#include "rte_flow_classify_parse.h"
-
-struct classify_valid_pattern {
- enum rte_flow_item_type *items;
- parse_filter_t parse_filter;
-};
-
-static struct classify_action action;
-
-/* Pattern for IPv4 5-tuple UDP filter */
-static enum rte_flow_item_type pattern_ntuple_1[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* Pattern for IPv4 5-tuple TCP filter */
-static enum rte_flow_item_type pattern_ntuple_2[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_TCP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-/* Pattern for IPv4 5-tuple SCTP filter */
-static enum rte_flow_item_type pattern_ntuple_3[] = {
- RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_SCTP,
- RTE_FLOW_ITEM_TYPE_END,
-};
-
-static int
-classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
- struct rte_flow_error *error);
-
-static struct classify_valid_pattern classify_supported_patterns[] = {
- /* ntuple */
- { pattern_ntuple_1, classify_parse_ntuple_filter },
- { pattern_ntuple_2, classify_parse_ntuple_filter },
- { pattern_ntuple_3, classify_parse_ntuple_filter },
-};
-
-struct classify_action *
-classify_get_flow_action(void)
-{
- return &action;
-}
-
-/* Find the first VOID or non-VOID item pointer */
-const struct rte_flow_item *
-classify_find_first_item(const struct rte_flow_item *item, bool is_void)
-{
- bool is_find;
-
- while (item->type != RTE_FLOW_ITEM_TYPE_END) {
- if (is_void)
- is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
- else
- is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
- if (is_find)
- break;
- item++;
- }
- return item;
-}
-
-/* Skip all VOID items of the pattern */
-void
-classify_pattern_skip_void_item(struct rte_flow_item *items,
- const struct rte_flow_item *pattern)
-{
- uint32_t cpy_count = 0;
- const struct rte_flow_item *pb = pattern, *pe = pattern;
-
- for (;;) {
- /* Find a non-void item first */
- pb = classify_find_first_item(pb, false);
- if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
- pe = pb;
- break;
- }
-
- /* Find a void item */
- pe = classify_find_first_item(pb + 1, true);
-
- cpy_count = pe - pb;
- rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
-
- items += cpy_count;
-
- if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
- pb = pe;
- break;
- }
- }
- /* Copy the END item. */
- rte_memcpy(items, pe, sizeof(struct rte_flow_item));
-}
-
-/* Check if the pattern matches a supported item type array */
-static bool
-classify_match_pattern(enum rte_flow_item_type *item_array,
- struct rte_flow_item *pattern)
-{
- struct rte_flow_item *item = pattern;
-
- while ((*item_array == item->type) &&
- (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
- item_array++;
- item++;
- }
-
- return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
- item->type == RTE_FLOW_ITEM_TYPE_END);
-}
-
-/* Find if there's parse filter function matched */
-parse_filter_t
-classify_find_parse_filter_func(struct rte_flow_item *pattern)
-{
- parse_filter_t parse_filter = NULL;
- uint8_t i = 0;
-
- for (; i < RTE_DIM(classify_supported_patterns); i++) {
- if (classify_match_pattern(classify_supported_patterns[i].items,
- pattern)) {
- parse_filter =
- classify_supported_patterns[i].parse_filter;
- break;
- }
- }
-
- return parse_filter;
-}
-
-#define FLOW_RULE_MIN_PRIORITY 8
-#define FLOW_RULE_MAX_PRIORITY 0
-
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do {\
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++;\
- item = pattern + index;\
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do {\
- act = actions + index;\
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++;\
- act = actions + index;\
- } \
- } while (0)
-
-/**
- * Please aware there's an assumption for all the parsers.
- * rte_flow_item is using big endian, rte_flow_attr and
- * rte_flow_action are using CPU order.
- * Because the pattern is used to describe the packets,
- * normally the packets should use network order.
- */
-
-/**
- * Parse the rule to see if it is a n-tuple rule.
- * And get the n-tuple filter info BTW.
- * pattern:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
- * The third not void item must be UDP or TCP.
- * The next not void item must be END.
- * action:
- * The first not void action should be QUEUE.
- * The next not void action should be END.
- * pattern example:
- * ITEM Spec Mask
- * ETH NULL NULL
- * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
- * dst_addr 192.167.3.50 0xFFFFFFFF
- * next_proto_id 17 0xFF
- * UDP/TCP/ src_port 80 0xFFFF
- * SCTP dst_port 80 0xFFFF
- * END
- * other members in mask and spec should set to 0x00.
- * item->last should be NULL.
- */
-static int
-classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
- struct rte_flow_error *error)
-{
- const struct rte_flow_item *item;
- const struct rte_flow_action *act;
- const struct rte_flow_item_ipv4 *ipv4_spec;
- const struct rte_flow_item_ipv4 *ipv4_mask;
- const struct rte_flow_item_tcp *tcp_spec;
- const struct rte_flow_item_tcp *tcp_mask;
- const struct rte_flow_item_udp *udp_spec;
- const struct rte_flow_item_udp *udp_mask;
- const struct rte_flow_item_sctp *sctp_spec;
- const struct rte_flow_item_sctp *sctp_mask;
- const struct rte_flow_action_count *count;
- const struct rte_flow_action_mark *mark_spec;
- uint32_t index;
-
- /* parse pattern */
- index = 0;
-
- /* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
-
- if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
- item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
- /* Skip Ethernet */
- if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- /*Not supported last point for range*/
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- item,
- "Not supported last point for range");
- return -EINVAL;
-
- }
- /* if the first item is MAC, the content should be NULL */
- if (item->spec || item->mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Not supported by ntuple filter");
- return -EINVAL;
- }
- /* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Not supported by ntuple filter");
- return -EINVAL;
- }
- }
-
- /* get the IPv4 info */
- if (!item->spec || !item->mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid ntuple mask");
- return -EINVAL;
- }
- /*Not supported last point for range*/
- if (item->last) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- item, "Not supported last point for range");
- return -EINVAL;
-
- }
-
- ipv4_mask = item->mask;
- /**
- * Only support src & dst addresses, protocol,
- * others should be masked.
- */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.type_of_service ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.time_to_live ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error,
- EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
- filter->src_ip_mask = ipv4_mask->hdr.src_addr;
- filter->proto_mask = ipv4_mask->hdr.next_proto_id;
-
- ipv4_spec = item->spec;
- filter->dst_ip = ipv4_spec->hdr.dst_addr;
- filter->src_ip = ipv4_spec->hdr.src_addr;
- filter->proto = ipv4_spec->hdr.next_proto_id;
-
- /* check if the next not void item is TCP or UDP or SCTP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
- item->type != RTE_FLOW_ITEM_TYPE_UDP &&
- item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- /* get the TCP/UDP info */
- if (!item->spec || !item->mask) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Invalid ntuple mask");
- return -EINVAL;
- }
-
- /*Not supported last point for range*/
- if (item->last) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- item, "Not supported last point for range");
- return -EINVAL;
-
- }
-
- if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = item->mask;
-
- /**
- * Only support src & dst ports, tcp flags,
- * others should be masked.
- */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- filter->dst_port_mask = tcp_mask->hdr.dst_port;
- filter->src_port_mask = tcp_mask->hdr.src_port;
- if (tcp_mask->hdr.tcp_flags == 0xFF) {
- filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
- } else if (!tcp_mask->hdr.tcp_flags) {
- filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
- } else {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- tcp_spec = item->spec;
- filter->dst_port = tcp_spec->hdr.dst_port;
- filter->src_port = tcp_spec->hdr.src_port;
- filter->tcp_flags = tcp_spec->hdr.tcp_flags;
- } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = item->mask;
-
- /**
- * Only support src & dst ports,
- * others should be masked.
- */
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- filter->dst_port_mask = udp_mask->hdr.dst_port;
- filter->src_port_mask = udp_mask->hdr.src_port;
-
- udp_spec = item->spec;
- filter->dst_port = udp_spec->hdr.dst_port;
- filter->src_port = udp_spec->hdr.src_port;
- } else {
- sctp_mask = item->mask;
-
- /**
- * Only support src & dst ports,
- * others should be masked.
- */
- if (sctp_mask->hdr.tag ||
- sctp_mask->hdr.cksum) {
- memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- filter->dst_port_mask = sctp_mask->hdr.dst_port;
- filter->src_port_mask = sctp_mask->hdr.src_port;
-
- sctp_spec = item->spec;
- filter->dst_port = sctp_spec->hdr.dst_port;
- filter->src_port = sctp_spec->hdr.src_port;
- }
-
- /* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
- if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by ntuple filter");
- return -EINVAL;
- }
-
- table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
-
- /* parse attr */
- /* must be input direction */
- if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- attr, "Only support ingress.");
- return -EINVAL;
- }
-
- /* not supported */
- if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- attr, "Not support egress.");
- return -EINVAL;
- }
-
- if (attr->priority > 0xFFFF) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "Error priority.");
- return -EINVAL;
- }
- filter->priority = (uint16_t)attr->priority;
- if (attr->priority > FLOW_RULE_MIN_PRIORITY)
- filter->priority = FLOW_RULE_MAX_PRIORITY;
-
- /* parse action */
- index = 0;
-
- /**
- * n-tuple only supports count and Mark,
- * check if the first not void action is COUNT or MARK.
- */
- memset(&action, 0, sizeof(action));
- NEXT_ITEM_OF_ACTION(act, actions, index);
- switch (act->type) {
- case RTE_FLOW_ACTION_TYPE_COUNT:
- action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
- count = act->conf;
- memcpy(&action.act.counter, count, sizeof(action.act.counter));
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
- mark_spec = act->conf;
- memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
- break;
- default:
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid action.");
- return -EINVAL;
- }
-
- /* check if the next not void item is MARK or COUNT or END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
- switch (act->type) {
- case RTE_FLOW_ACTION_TYPE_COUNT:
- action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
- count = act->conf;
- memcpy(&action.act.counter, count, sizeof(action.act.counter));
- break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
- mark_spec = act->conf;
- memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
- break;
- case RTE_FLOW_ACTION_TYPE_END:
- return 0;
- default:
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid action.");
- return -EINVAL;
- }
-
- /* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid action.");
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/lib/flow_classify/rte_flow_classify_parse.h b/lib/flow_classify/rte_flow_classify_parse.h
deleted file mode 100644
index 7943efc0d4ba..000000000000
--- a/lib/flow_classify/rte_flow_classify_parse.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
-#define _RTE_FLOW_CLASSIFY_PARSE_H_
-
-#include <rte_ethdev.h>
-#include <rte_flow.h>
-#include <stdbool.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-extern enum rte_flow_classify_table_type table_type;
-
-struct classify_action {
- /* Flow action mask */
- uint64_t action_mask;
-
- struct action {
- /** Integer value to return with packets */
- struct rte_flow_action_mark mark;
- /** Flow rule counter */
- struct rte_flow_query_count counter;
- } act;
-};
-
-typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
- struct rte_flow_error *error);
-
-/* Skip all VOID items of the pattern */
-void
-classify_pattern_skip_void_item(struct rte_flow_item *items,
- const struct rte_flow_item *pattern);
-
-/* Find the first VOID or non-VOID item pointer */
-const struct rte_flow_item *
-classify_find_first_item(const struct rte_flow_item *item, bool is_void);
-
-
-/* Find if there's parse filter function matched */
-parse_filter_t
-classify_find_parse_filter_func(struct rte_flow_item *pattern);
-
-/* get action data */
-struct classify_action *
-classify_get_flow_action(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */
diff --git a/lib/flow_classify/version.map b/lib/flow_classify/version.map
deleted file mode 100644
index 49bc25c6a087..000000000000
--- a/lib/flow_classify/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-EXPERIMENTAL {
- global:
-
- rte_flow_classifier_create;
- rte_flow_classifier_free;
- rte_flow_classifier_query;
- rte_flow_classify_table_create;
- rte_flow_classify_table_entry_add;
- rte_flow_classify_table_entry_delete;
- rte_flow_classify_validate;
-
- local: *;
-};
diff --git a/lib/meson.build b/lib/meson.build
index fac2f52cad4f..ecac701161c8 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -62,7 +62,6 @@ libraries = [
'pdump', # pdump lib depends on bpf
'table',
'pipeline',
- 'flow_classify', # flow_classify lib depends on pkt framework table lib
'graph',
'node',
]
@@ -70,7 +69,6 @@ libraries = [
optional_libs = [
'bitratestats',
'cfgfile',
- 'flow_classify',
'gpudev',
'graph',
'gro',
@@ -89,7 +87,6 @@ optional_libs = [
]
dpdk_libs_deprecated += [
- 'flow_classify',
'kni',
]
diff --git a/meson_options.txt b/meson_options.txt
index 82c8297065f0..95e22e0ce70c 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -10,7 +10,7 @@ option('disable_apps', type: 'string', value: '', description:
'Comma-separated list of apps to explicitly disable.')
option('disable_drivers', type: 'string', value: '', description:
'Comma-separated list of drivers to explicitly disable.')
-option('disable_libs', type: 'string', value: 'flow_classify,kni', description:
+option('disable_libs', type: 'string', value: 'kni', description:
'Comma-separated list of libraries to explicitly disable. [NOTE: not all libs can be disabled]')
option('drivers_install_subdir', type: 'string', value: 'dpdk/pmds-<VERSION>', description:
'Subdirectory of libdir where to install PMDs. Defaults to using a versioned subdirectory.')
--
2.39.2
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2 2/2] kni: remove deprecated kernel network interface
2023-08-01 16:04 [PATCH v2 0/2] Remove disabled functionality Stephen Hemminger
2023-08-01 16:04 ` [PATCH v2 1/2] flow_classify: remove library Stephen Hemminger
@ 2023-08-01 16:05 ` Stephen Hemminger
2023-08-04 13:19 ` David Marchand
2023-08-04 13:02 ` [PATCH v2 0/2] Remove disabled functionality David Marchand
2 siblings, 1 reply; 9+ messages in thread
From: Stephen Hemminger @ 2023-08-01 16:05 UTC (permalink / raw)
To: dev
Cc: Stephen Hemminger, Thomas Monjalon, Maxime Coquelin, Chenbo Xia,
Anatoly Burakov, Cristian Dumitrescu, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao, Bruce Richardson
The KNI driver had design flaws such as calling userspace with kernel
mutex held that made it prone to deadlock. The design also introduced
security risks because the kernel driver trusted that the userspace
(DPDK) kni interface. The kernel driver was never reviewed by
the upstream kernel community and would never have been accepted.
And since the Linux kernel API is not stable, it was a continual
source of maintenance issues especially with distribution kernels.
There are better ways to inject packets into the kernel such as
virtio_user, tap and XDP drivers. All of these do not need out of
tree kernel drivers.
The deprecation was announced in 22.11 release; and users were
directed to alternatives there.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
MAINTAINERS | 10 -
app/test/meson.build | 2 -
app/test/test_kni.c | 740 ---------------
doc/api/doxy-api-index.md | 2 -
doc/api/doxy-api.conf.in | 1 -
doc/guides/contributing/documentation.rst | 4 +-
doc/guides/howto/flow_bifurcation.rst | 3 +-
doc/guides/nics/index.rst | 1 -
doc/guides/nics/kni.rst | 170 ----
doc/guides/nics/virtio.rst | 92 +-
.../prog_guide/env_abstraction_layer.rst | 2 -
doc/guides/prog_guide/glossary.rst | 3 -
doc/guides/prog_guide/index.rst | 1 -
.../prog_guide/kernel_nic_interface.rst | 423 ---------
doc/guides/prog_guide/packet_framework.rst | 9 +-
doc/guides/rel_notes/deprecation.rst | 9 +-
doc/guides/rel_notes/release_23_11.rst | 2 +
doc/guides/sample_app_ug/ip_pipeline.rst | 22 -
drivers/net/cnxk/cnxk_ethdev.c | 2 +-
drivers/net/kni/meson.build | 11 -
drivers/net/kni/rte_eth_kni.c | 524 -----------
drivers/net/meson.build | 1 -
examples/ip_pipeline/Makefile | 1 -
examples/ip_pipeline/cli.c | 95 --
examples/ip_pipeline/examples/kni.cli | 69 --
examples/ip_pipeline/kni.c | 168 ----
examples/ip_pipeline/kni.h | 46 -
examples/ip_pipeline/main.c | 10 -
examples/ip_pipeline/meson.build | 1 -
examples/ip_pipeline/pipeline.c | 57 --
examples/ip_pipeline/pipeline.h | 2 -
kernel/linux/kni/Kbuild | 6 -
kernel/linux/kni/compat.h | 157 ----
kernel/linux/kni/kni_dev.h | 137 ---
kernel/linux/kni/kni_fifo.h | 87 --
kernel/linux/kni/kni_misc.c | 719 --------------
kernel/linux/kni/kni_net.c | 878 ------------------
kernel/linux/kni/meson.build | 41 -
kernel/linux/meson.build | 2 +-
lib/eal/common/eal_common_log.c | 1 -
lib/eal/include/rte_log.h | 2 +-
lib/eal/linux/eal.c | 19 -
lib/kni/meson.build | 21 -
lib/kni/rte_kni.c | 843 -----------------
lib/kni/rte_kni.h | 269 ------
lib/kni/rte_kni_common.h | 147 ---
lib/kni/rte_kni_fifo.h | 117 ---
lib/kni/version.map | 24 -
lib/meson.build | 6 -
lib/port/meson.build | 6 -
lib/port/rte_port_kni.c | 515 ----------
lib/port/rte_port_kni.h | 63 --
lib/port/version.map | 3 -
meson_options.txt | 2 +-
54 files changed, 14 insertions(+), 6534 deletions(-)
delete mode 100644 app/test/test_kni.c
delete mode 100644 doc/guides/nics/kni.rst
delete mode 100644 doc/guides/prog_guide/kernel_nic_interface.rst
delete mode 100644 drivers/net/kni/meson.build
delete mode 100644 drivers/net/kni/rte_eth_kni.c
delete mode 100644 examples/ip_pipeline/examples/kni.cli
delete mode 100644 examples/ip_pipeline/kni.c
delete mode 100644 examples/ip_pipeline/kni.h
delete mode 100644 kernel/linux/kni/Kbuild
delete mode 100644 kernel/linux/kni/compat.h
delete mode 100644 kernel/linux/kni/kni_dev.h
delete mode 100644 kernel/linux/kni/kni_fifo.h
delete mode 100644 kernel/linux/kni/kni_misc.c
delete mode 100644 kernel/linux/kni/kni_net.c
delete mode 100644 kernel/linux/kni/meson.build
delete mode 100644 lib/kni/meson.build
delete mode 100644 lib/kni/rte_kni.c
delete mode 100644 lib/kni/rte_kni.h
delete mode 100644 lib/kni/rte_kni_common.h
delete mode 100644 lib/kni/rte_kni_fifo.h
delete mode 100644 lib/kni/version.map
delete mode 100644 lib/port/rte_port_kni.c
delete mode 100644 lib/port/rte_port_kni.h
diff --git a/MAINTAINERS b/MAINTAINERS
index dbb25211c367..6345e7f8a65d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -617,12 +617,6 @@ F: doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
F: app/test/test_link_bonding*
F: examples/bond/
-Linux KNI
-F: kernel/linux/kni/
-F: lib/kni/
-F: doc/guides/prog_guide/kernel_nic_interface.rst
-F: app/test/test_kni.c
-
Linux AF_PACKET
M: John W. Linville <linville@tuxdriver.com>
F: drivers/net/af_packet/
@@ -1027,10 +1021,6 @@ F: drivers/net/tap/
F: doc/guides/nics/tap.rst
F: doc/guides/nics/features/tap.ini
-KNI PMD
-F: drivers/net/kni/
-F: doc/guides/nics/kni.rst
-
Ring PMD
M: Bruce Richardson <bruce.richardson@intel.com>
F: drivers/net/ring/
diff --git a/app/test/meson.build b/app/test/meson.build
index 90a2e350c7ae..66897c14a399 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -72,7 +72,6 @@ test_sources = files(
'test_ipsec.c',
'test_ipsec_sad.c',
'test_ipsec_perf.c',
- 'test_kni.c',
'test_kvargs.c',
'test_lcores.c',
'test_logs.c',
@@ -237,7 +236,6 @@ fast_tests = [
['fbarray_autotest', true, true],
['hash_readwrite_func_autotest', false, true],
['ipsec_autotest', true, true],
- ['kni_autotest', false, true],
['kvargs_autotest', true, true],
['member_autotest', true, true],
['power_cpufreq_autotest', false, true],
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
deleted file mode 100644
index 4039da0b080c..000000000000
--- a/app/test/test_kni.c
+++ /dev/null
@@ -1,740 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#include "test.h"
-
-#include <stdio.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <string.h>
-#if !defined(RTE_EXEC_ENV_LINUX) || !defined(RTE_LIB_KNI)
-
-static int
-test_kni(void)
-{
- printf("KNI not supported, skipping test\n");
- return TEST_SKIPPED;
-}
-
-#else
-
-#include <sys/wait.h>
-#include <dirent.h>
-
-#include <rte_string_fns.h>
-#include <rte_mempool.h>
-#include <rte_ethdev.h>
-#include <rte_cycles.h>
-#include <rte_kni.h>
-
-#define NB_MBUF 8192
-#define MAX_PACKET_SZ 2048
-#define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
-#define PKT_BURST_SZ 32
-#define MEMPOOL_CACHE_SZ PKT_BURST_SZ
-#define SOCKET 0
-#define NB_RXD 1024
-#define NB_TXD 1024
-#define KNI_TIMEOUT_MS 5000 /* ms */
-
-#define IFCONFIG "/sbin/ifconfig "
-#define TEST_KNI_PORT "test_kni_port"
-#define KNI_MODULE_PATH "/sys/module/rte_kni"
-#define KNI_MODULE_PARAM_LO KNI_MODULE_PATH"/parameters/lo_mode"
-#define KNI_TEST_MAX_PORTS 4
-/* The threshold number of mbufs to be transmitted or received. */
-#define KNI_NUM_MBUF_THRESHOLD 100
-static int kni_pkt_mtu = 0;
-
-struct test_kni_stats {
- volatile uint64_t ingress;
- volatile uint64_t egress;
-};
-
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
- .rx_free_thresh = 0,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
-};
-
-static const struct rte_eth_conf port_conf = {
- .txmode = {
- .mq_mode = RTE_ETH_MQ_TX_NONE,
- },
-};
-
-static struct rte_kni_ops kni_ops = {
- .change_mtu = NULL,
- .config_network_if = NULL,
- .config_mac_address = NULL,
- .config_promiscusity = NULL,
-};
-
-static unsigned int lcore_main, lcore_ingress, lcore_egress;
-static struct rte_kni *test_kni_ctx;
-static struct test_kni_stats stats;
-
-static volatile uint32_t test_kni_processing_flag;
-
-static struct rte_mempool *
-test_kni_create_mempool(void)
-{
- struct rte_mempool * mp;
-
- mp = rte_mempool_lookup("kni_mempool");
- if (!mp)
- mp = rte_pktmbuf_pool_create("kni_mempool",
- NB_MBUF,
- MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
- SOCKET);
-
- return mp;
-}
-
-static struct rte_mempool *
-test_kni_lookup_mempool(void)
-{
- return rte_mempool_lookup("kni_mempool");
-}
-/* Callback for request of changing MTU */
-static int
-kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
-{
- printf("Change MTU of port %d to %u\n", port_id, new_mtu);
- kni_pkt_mtu = new_mtu;
- printf("Change MTU of port %d to %i successfully.\n",
- port_id, kni_pkt_mtu);
- return 0;
-}
-
-static int
-test_kni_link_change(void)
-{
- int ret;
- int pid;
-
- pid = fork();
- if (pid < 0) {
- printf("Error: Failed to fork a process\n");
- return -1;
- }
-
- if (pid == 0) {
- printf("Starting KNI Link status change tests.\n");
- if (system(IFCONFIG TEST_KNI_PORT" up") == -1) {
- ret = -1;
- goto error;
- }
-
- ret = rte_kni_update_link(test_kni_ctx, 1);
- if (ret < 0) {
- printf("Failed to change link state to Up ret=%d.\n",
- ret);
- goto error;
- }
- rte_delay_ms(1000);
- printf("KNI: Set LINKUP, previous state=%d\n", ret);
-
- ret = rte_kni_update_link(test_kni_ctx, 0);
- if (ret != 1) {
- printf(
- "Failed! Previous link state should be 1, returned %d.\n",
- ret);
- goto error;
- }
- rte_delay_ms(1000);
- printf("KNI: Set LINKDOWN, previous state=%d\n", ret);
-
- ret = rte_kni_update_link(test_kni_ctx, 1);
- if (ret != 0) {
- printf(
- "Failed! Previous link state should be 0, returned %d.\n",
- ret);
- goto error;
- }
- printf("KNI: Set LINKUP, previous state=%d\n", ret);
-
- ret = 0;
- rte_delay_ms(1000);
-
-error:
- if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
- ret = -1;
-
- printf("KNI: Link status change tests: %s.\n",
- (ret == 0) ? "Passed" : "Failed");
- exit(ret);
- } else {
- int p_ret, status;
-
- while (1) {
- p_ret = waitpid(pid, &status, WNOHANG);
- if (p_ret != 0) {
- if (WIFEXITED(status))
- return WEXITSTATUS(status);
- return -1;
- }
- rte_delay_ms(10);
- rte_kni_handle_request(test_kni_ctx);
- }
- }
-}
-/**
- * This loop fully tests the basic functions of KNI. e.g. transmitting,
- * receiving to, from kernel space, and kernel requests.
- *
- * This is the loop to transmit/receive mbufs to/from kernel interface with
- * supported by KNI kernel module. The ingress lcore will allocate mbufs and
- * transmit them to kernel space; while the egress lcore will receive the mbufs
- * from kernel space and free them.
- * On the main lcore, several commands will be run to check handling the
- * kernel requests. And it will finally set the flag to exit the KNI
- * transmitting/receiving to/from the kernel space.
- *
- * Note: To support this testing, the KNI kernel module needs to be insmodded
- * in one of its loopback modes.
- */
-static int
-test_kni_loop(__rte_unused void *arg)
-{
- int ret = 0;
- unsigned nb_rx, nb_tx, num, i;
- const unsigned lcore_id = rte_lcore_id();
- struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
-
- if (lcore_id == lcore_main) {
- rte_delay_ms(KNI_TIMEOUT_MS);
- /* tests of handling kernel request */
- if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
- ret = -1;
- if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
- ret = -1;
- if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
- ret = -1;
- rte_delay_ms(KNI_TIMEOUT_MS);
- test_kni_processing_flag = 1;
- } else if (lcore_id == lcore_ingress) {
- struct rte_mempool *mp = test_kni_lookup_mempool();
-
- if (mp == NULL)
- return -1;
-
- while (1) {
- if (test_kni_processing_flag)
- break;
-
- for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
- pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
- if (!pkts_burst[nb_rx])
- break;
- }
-
- num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
- nb_rx);
- stats.ingress += num;
- rte_kni_handle_request(test_kni_ctx);
- if (num < nb_rx) {
- for (i = num; i < nb_rx; i++) {
- rte_pktmbuf_free(pkts_burst[i]);
- }
- }
- rte_delay_ms(10);
- }
- } else if (lcore_id == lcore_egress) {
- while (1) {
- if (test_kni_processing_flag)
- break;
- num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
- PKT_BURST_SZ);
- stats.egress += num;
- for (nb_tx = 0; nb_tx < num; nb_tx++)
- rte_pktmbuf_free(pkts_burst[nb_tx]);
- rte_delay_ms(10);
- }
- }
-
- return ret;
-}
-
-static int
-test_kni_allocate_lcores(void)
-{
- unsigned i, count = 0;
-
- lcore_main = rte_get_main_lcore();
- printf("main lcore: %u\n", lcore_main);
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- if (count >=2 )
- break;
- if (rte_lcore_is_enabled(i) && i != lcore_main) {
- count ++;
- if (count == 1)
- lcore_ingress = i;
- else if (count == 2)
- lcore_egress = i;
- }
- }
- printf("count: %u\n", count);
-
- return count == 2 ? 0 : -1;
-}
-
-static int
-test_kni_register_handler_mp(void)
-{
-#define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
-#define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
-#define TEST_KNI_MTU 1450
-#define TEST_KNI_MTU_STR " 1450"
- int pid;
-
- pid = fork();
- if (pid < 0) {
- printf("Failed to fork a process\n");
- return -1;
- } else if (pid == 0) {
- int i;
- struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
- struct rte_kni_ops ops = {
- .change_mtu = kni_change_mtu,
- .config_network_if = NULL,
- .config_mac_address = NULL,
- .config_promiscusity = NULL,
- };
-
- if (!kni) {
- printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
- exit(-1);
- }
-
- kni_pkt_mtu = 0;
-
- /* Check with the invalid parameters */
- if (rte_kni_register_handlers(kni, NULL) == 0) {
- printf("Unexpectedly register successfully "
- "with NULL ops pointer\n");
- exit(-1);
- }
- if (rte_kni_register_handlers(NULL, &ops) == 0) {
- printf("Unexpectedly register successfully "
- "to NULL KNI device pointer\n");
- exit(-1);
- }
-
- if (rte_kni_register_handlers(kni, &ops)) {
- printf("Fail to register ops\n");
- exit(-1);
- }
-
- /* Check registering again after it has been registered */
- if (rte_kni_register_handlers(kni, &ops) == 0) {
- printf("Unexpectedly register successfully after "
- "it has already been registered\n");
- exit(-1);
- }
-
- /**
- * Handle the request of setting MTU,
- * with registered handlers.
- */
- for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
- rte_kni_handle_request(kni);
- if (kni_pkt_mtu == TEST_KNI_MTU)
- break;
- rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
- }
- if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
- printf("MTU has not been set\n");
- exit(-1);
- }
-
- kni_pkt_mtu = 0;
- if (rte_kni_unregister_handlers(kni) < 0) {
- printf("Fail to unregister ops\n");
- exit(-1);
- }
-
- /* Check with invalid parameter */
- if (rte_kni_unregister_handlers(NULL) == 0) {
- exit(-1);
- }
-
- /**
- * Handle the request of setting MTU,
- * without registered handlers.
- */
- for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
- rte_kni_handle_request(kni);
- if (kni_pkt_mtu != 0)
- break;
- rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
- }
- if (kni_pkt_mtu != 0) {
- printf("MTU shouldn't be set\n");
- exit(-1);
- }
-
- exit(0);
- } else {
- int p_ret, status;
-
- rte_delay_ms(1000);
- if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
- == -1)
- return -1;
-
- rte_delay_ms(1000);
- if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
- == -1)
- return -1;
-
- p_ret = wait(&status);
- if (!WIFEXITED(status)) {
- printf("Child process (%d) exit abnormally\n", p_ret);
- return -1;
- }
- if (WEXITSTATUS(status) != 0) {
- printf("Child process exit with failure\n");
- return -1;
- }
- }
-
- return 0;
-}
-
-static int
-test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
-{
- int ret = 0;
- unsigned i;
- struct rte_kni *kni;
- struct rte_kni_conf conf;
- struct rte_eth_dev_info info;
- struct rte_kni_ops ops;
-
- if (!mp)
- return -1;
-
- memset(&conf, 0, sizeof(conf));
- memset(&info, 0, sizeof(info));
- memset(&ops, 0, sizeof(ops));
-
- ret = rte_eth_dev_info_get(port_id, &info);
- if (ret != 0) {
- printf("Error during getting device (port %u) info: %s\n",
- port_id, strerror(-ret));
- return -1;
- }
-
- snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
-
- /* core id 1 configured for kernel thread */
- conf.core_id = 1;
- conf.force_bind = 1;
- conf.mbuf_size = MAX_PACKET_SZ;
- conf.group_id = port_id;
-
- ops = kni_ops;
- ops.port_id = port_id;
-
- /* basic test of kni processing */
- kni = rte_kni_alloc(mp, &conf, &ops);
- if (!kni) {
- printf("fail to create kni\n");
- return -1;
- }
-
- test_kni_ctx = kni;
- test_kni_processing_flag = 0;
- stats.ingress = 0;
- stats.egress = 0;
-
- /**
- * Check multiple processes support on
- * registering/unregistering handlers.
- */
- if (test_kni_register_handler_mp() < 0) {
- printf("fail to check multiple process support\n");
- ret = -1;
- goto fail_kni;
- }
-
- ret = test_kni_link_change();
- if (ret != 0)
- goto fail_kni;
-
- rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MAIN);
- RTE_LCORE_FOREACH_WORKER(i) {
- if (rte_eal_wait_lcore(i) < 0) {
- ret = -1;
- goto fail_kni;
- }
- }
- /**
- * Check if the number of mbufs received from kernel space is equal
- * to that of transmitted to kernel space
- */
- if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
- stats.egress < KNI_NUM_MBUF_THRESHOLD) {
- printf("The ingress/egress number should not be "
- "less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
- ret = -1;
- goto fail_kni;
- }
-
- if (rte_kni_release(kni) < 0) {
- printf("fail to release kni\n");
- return -1;
- }
- test_kni_ctx = NULL;
-
- /* test of reusing memzone */
- kni = rte_kni_alloc(mp, &conf, &ops);
- if (!kni) {
- printf("fail to create kni\n");
- return -1;
- }
-
- /* Release the kni for following testing */
- if (rte_kni_release(kni) < 0) {
- printf("fail to release kni\n");
- return -1;
- }
-
- return ret;
-fail_kni:
- if (rte_kni_release(kni) < 0) {
- printf("fail to release kni\n");
- ret = -1;
- }
-
- return ret;
-}
-
-static int
-test_kni(void)
-{
- int ret = -1;
- uint16_t port_id;
- struct rte_kni *kni;
- struct rte_mempool *mp;
- struct rte_kni_conf conf;
- struct rte_eth_dev_info info;
- struct rte_kni_ops ops;
- FILE *fd;
- DIR *dir;
- char buf[16];
-
- dir = opendir(KNI_MODULE_PATH);
- if (!dir) {
- if (errno == ENOENT) {
- printf("Cannot run UT due to missing rte_kni module\n");
- return TEST_SKIPPED;
- }
- printf("opendir: %s", strerror(errno));
- return -1;
- }
- closedir(dir);
-
- /* Initialize KNI subsystem */
- ret = rte_kni_init(KNI_TEST_MAX_PORTS);
- if (ret < 0) {
- printf("fail to initialize KNI subsystem\n");
- return -1;
- }
-
- if (test_kni_allocate_lcores() < 0) {
- printf("No enough lcores for kni processing\n");
- return -1;
- }
-
- mp = test_kni_create_mempool();
- if (!mp) {
- printf("fail to create mempool for kni\n");
- return -1;
- }
-
- /* configuring port 0 for the test is enough */
- port_id = 0;
- ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
- if (ret < 0) {
- printf("fail to configure port %d\n", port_id);
- return -1;
- }
-
- ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
- if (ret < 0) {
- printf("fail to setup rx queue for port %d\n", port_id);
- return -1;
- }
-
- ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
- if (ret < 0) {
- printf("fail to setup tx queue for port %d\n", port_id);
- return -1;
- }
-
- ret = rte_eth_dev_start(port_id);
- if (ret < 0) {
- printf("fail to start port %d\n", port_id);
- return -1;
- }
- ret = rte_eth_promiscuous_enable(port_id);
- if (ret != 0) {
- printf("fail to enable promiscuous mode for port %d: %s\n",
- port_id, rte_strerror(-ret));
- return -1;
- }
-
- /* basic test of kni processing */
- fd = fopen(KNI_MODULE_PARAM_LO, "r");
- if (fd == NULL) {
- printf("fopen: %s", strerror(errno));
- return -1;
- }
- memset(&buf, 0, sizeof(buf));
- if (fgets(buf, sizeof(buf), fd)) {
- if (!strncmp(buf, "lo_mode_fifo", strlen("lo_mode_fifo")) ||
- !strncmp(buf, "lo_mode_fifo_skb",
- strlen("lo_mode_fifo_skb"))) {
- ret = test_kni_processing(port_id, mp);
- if (ret < 0) {
- fclose(fd);
- goto fail;
- }
- } else
- printf("test_kni_processing skipped because of missing rte_kni module lo_mode argument\n");
- }
- fclose(fd);
-
- /* test of allocating KNI with NULL mempool pointer */
- memset(&info, 0, sizeof(info));
- memset(&conf, 0, sizeof(conf));
- memset(&ops, 0, sizeof(ops));
-
- ret = rte_eth_dev_info_get(port_id, &info);
- if (ret != 0) {
- printf("Error during getting device (port %u) info: %s\n",
- port_id, strerror(-ret));
- return -1;
- }
-
- conf.group_id = port_id;
- conf.mbuf_size = MAX_PACKET_SZ;
-
- ops = kni_ops;
- ops.port_id = port_id;
- kni = rte_kni_alloc(NULL, &conf, &ops);
- if (kni) {
- ret = -1;
- printf("unexpectedly creates kni successfully with NULL "
- "mempool pointer\n");
- goto fail;
- }
-
- /* test of allocating KNI without configurations */
- kni = rte_kni_alloc(mp, NULL, NULL);
- if (kni) {
- ret = -1;
- printf("Unexpectedly allocate KNI device successfully "
- "without configurations\n");
- goto fail;
- }
-
- /* test of allocating KNI without a name */
- memset(&conf, 0, sizeof(conf));
- memset(&info, 0, sizeof(info));
- memset(&ops, 0, sizeof(ops));
-
- ret = rte_eth_dev_info_get(port_id, &info);
- if (ret != 0) {
- printf("Error during getting device (port %u) info: %s\n",
- port_id, strerror(-ret));
- ret = -1;
- goto fail;
- }
-
- conf.group_id = port_id;
- conf.mbuf_size = MAX_PACKET_SZ;
-
- ops = kni_ops;
- ops.port_id = port_id;
- kni = rte_kni_alloc(mp, &conf, &ops);
- if (kni) {
- ret = -1;
- printf("Unexpectedly allocate a KNI device successfully "
- "without a name\n");
- goto fail;
- }
-
- /* test of releasing NULL kni context */
- ret = rte_kni_release(NULL);
- if (ret == 0) {
- ret = -1;
- printf("unexpectedly release kni successfully\n");
- goto fail;
- }
-
- /* test of handling request on NULL device pointer */
- ret = rte_kni_handle_request(NULL);
- if (ret == 0) {
- ret = -1;
- printf("Unexpectedly handle request on NULL device pointer\n");
- goto fail;
- }
-
- /* test of getting KNI device with pointer to NULL */
- kni = rte_kni_get(NULL);
- if (kni) {
- ret = -1;
- printf("Unexpectedly get a KNI device with "
- "NULL name pointer\n");
- goto fail;
- }
-
- /* test of getting KNI device with an zero length name string */
- memset(&conf, 0, sizeof(conf));
- kni = rte_kni_get(conf.name);
- if (kni) {
- ret = -1;
- printf("Unexpectedly get a KNI device with "
- "zero length name string\n");
- goto fail;
- }
-
- /* test of getting KNI device with an invalid string name */
- memset(&conf, 0, sizeof(conf));
- snprintf(conf.name, sizeof(conf.name), "testing");
- kni = rte_kni_get(conf.name);
- if (kni) {
- ret = -1;
- printf("Unexpectedly get a KNI device with "
- "a never used name string\n");
- goto fail;
- }
- ret = 0;
-
-fail:
- if (rte_eth_dev_stop(port_id) != 0)
- printf("Failed to stop port %u\n", port_id);
-
- return ret;
-}
-
-#endif
-
-REGISTER_TEST_COMMAND(kni_autotest, test_kni);
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 5cd8c9de8105..fdeda139329e 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -43,7 +43,6 @@ The public API headers are grouped by topics:
[bond](@ref rte_eth_bond.h),
[vhost](@ref rte_vhost.h),
[vdpa](@ref rte_vdpa.h),
- [KNI](@ref rte_kni.h),
[ixgbe](@ref rte_pmd_ixgbe.h),
[i40e](@ref rte_pmd_i40e.h),
[iavf](@ref rte_pmd_iavf.h),
@@ -177,7 +176,6 @@ The public API headers are grouped by topics:
[frag](@ref rte_port_frag.h),
[reass](@ref rte_port_ras.h),
[sched](@ref rte_port_sched.h),
- [kni](@ref rte_port_kni.h),
[src/sink](@ref rte_port_source_sink.h)
* [table](@ref rte_table.h):
[lpm IPv4](@ref rte_table_lpm.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 9a9c52e5569c..31885039c768 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -48,7 +48,6 @@ INPUT = @TOPDIR@/doc/api/doxy-api-index.md \
@TOPDIR@/lib/ip_frag \
@TOPDIR@/lib/ipsec \
@TOPDIR@/lib/jobstats \
- @TOPDIR@/lib/kni \
@TOPDIR@/lib/kvargs \
@TOPDIR@/lib/latencystats \
@TOPDIR@/lib/lpm \
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index 7fcbb7fc43b2..38e184a130ee 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -94,8 +94,8 @@ added to by the developer.
* **The Programmers Guide**
- The Programmers Guide explains how the API components of DPDK such as the EAL, Memzone, Rings and the Hash Library work.
- It also explains how some higher level functionality such as Packet Distributor, Packet Framework and KNI work.
+ The Programmers Guide explains how the API components of the DPDK such as the EAL, Memzone, Rings and the Hash Library work.
+ It also describes how some of the higher level functionality such as Packet Distributor and Packet Framework.
It also shows the build system and explains how to add applications.
The Programmers Guide should be expanded when new functionality is added to DPDK.
diff --git a/doc/guides/howto/flow_bifurcation.rst b/doc/guides/howto/flow_bifurcation.rst
index 838eb2a4cc89..554dd24c32c5 100644
--- a/doc/guides/howto/flow_bifurcation.rst
+++ b/doc/guides/howto/flow_bifurcation.rst
@@ -7,8 +7,7 @@ Flow Bifurcation How-to Guide
Flow Bifurcation is a mechanism which uses hardware capable Ethernet devices
to split traffic between Linux user space and kernel space. Since it is a
hardware assisted feature this approach can provide line rate processing
-capability. Other than :ref:`KNI <kni>`, the software is just required to
-enable device configuration, there is no need to take care of the packet
+capability. There is no need to take care of the packet
movement during the traffic split. This can yield better performance with
less CPU overhead.
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 31296822e5ec..7bfcac880f44 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -43,7 +43,6 @@ Network Interface Controller Drivers
ionic
ipn3ke
ixgbe
- kni
mana
memif
mlx4
diff --git a/doc/guides/nics/kni.rst b/doc/guides/nics/kni.rst
deleted file mode 100644
index bd3033bb585c..000000000000
--- a/doc/guides/nics/kni.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2017 Intel Corporation.
-
-KNI Poll Mode Driver
-======================
-
-KNI PMD is wrapper to the :ref:`librte_kni <kni>` library.
-
-This PMD enables using KNI without having a KNI specific application,
-any forwarding application can use PMD interface for KNI.
-
-Sending packets to any DPDK controlled interface or sending to the
-Linux networking stack will be transparent to the DPDK application.
-
-To create a KNI device ``net_kni#`` device name should be used, and this
-will create ``kni#`` Linux virtual network interface.
-
-There is no physical device backend for the virtual KNI device.
-
-Packets sent to the KNI Linux interface will be received by the DPDK
-application, and DPDK application may forward packets to a physical NIC
-or to a virtual device (like another KNI interface or PCAP interface).
-
-To forward any traffic from physical NIC to the Linux networking stack,
-an application should control a physical port and create one virtual KNI port,
-and forward between two.
-
-Using this PMD requires KNI kernel module be inserted.
-
-
-Usage
------
-
-EAL ``--vdev`` argument can be used to create KNI device instance, like::
-
- dpdk-testpmd --vdev=net_kni0 --vdev=net_kni1 -- -i
-
-Above command will create ``kni0`` and ``kni1`` Linux network interfaces,
-those interfaces can be controlled by standard Linux tools.
-
-When testpmd forwarding starts, any packets sent to ``kni0`` interface
-forwarded to the ``kni1`` interface and vice versa.
-
-There is no hard limit on number of interfaces that can be created.
-
-
-Default interface configuration
--------------------------------
-
-``librte_kni`` can create Linux network interfaces with different features,
-feature set controlled by a configuration struct, and KNI PMD uses a fixed
-configuration:
-
- .. code-block:: console
-
- Interface name: kni#
- force bind kernel thread to a core : NO
- mbuf size: (rte_pktmbuf_data_room_size(pktmbuf_pool) - RTE_PKTMBUF_HEADROOM)
- mtu: (conf.mbuf_size - RTE_ETHER_HDR_LEN)
-
-KNI control path is not supported with the PMD, since there is no physical
-backend device by default.
-
-
-Runtime Configuration
----------------------
-
-``no_request_thread``, by default PMD creates a pthread for each KNI interface
-to handle Linux network interface control commands, like ``ifconfig kni0 up``
-
-With ``no_request_thread`` option, pthread is not created and control commands
-not handled by PMD.
-
-By default request thread is enabled. And this argument should not be used
-most of the time, unless this PMD used with customized DPDK application to handle
-requests itself.
-
-Argument usage::
-
- dpdk-testpmd --vdev "net_kni0,no_request_thread=1" -- -i
-
-
-PMD log messages
-----------------
-
-If KNI kernel module (rte_kni.ko) not inserted, following error log printed::
-
- "KNI: KNI subsystem has not been initialized. Invoke rte_kni_init() first"
-
-
-PMD testing
------------
-
-It is possible to test PMD quickly using KNI kernel module loopback feature:
-
-* Insert KNI kernel module with loopback support:
-
- .. code-block:: console
-
- insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo_skb
-
-* Start testpmd with no physical device but two KNI virtual devices:
-
- .. code-block:: console
-
- ./dpdk-testpmd --vdev net_kni0 --vdev net_kni1 -- -i
-
- .. code-block:: console
-
- ...
- Configuring Port 0 (socket 0)
- KNI: pci: 00:00:00 c580:b8
- Port 0: 1A:4A:5B:7C:A2:8C
- Configuring Port 1 (socket 0)
- KNI: pci: 00:00:00 600:b9
- Port 1: AE:95:21:07:93:DD
- Checking link statuses...
- Port 0 Link Up - speed 10000 Mbps - full-duplex
- Port 1 Link Up - speed 10000 Mbps - full-duplex
- Done
- testpmd>
-
-* Observe Linux interfaces
-
- .. code-block:: console
-
- $ ifconfig kni0 && ifconfig kni1
- kni0: flags=4098<BROADCAST,MULTICAST> mtu 1500
- ether ae:8e:79:8e:9b:c8 txqueuelen 1000 (Ethernet)
- RX packets 0 bytes 0 (0.0 B)
- RX errors 0 dropped 0 overruns 0 frame 0
- TX packets 0 bytes 0 (0.0 B)
- TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
-
- kni1: flags=4098<BROADCAST,MULTICAST> mtu 1500
- ether 9e:76:43:53:3e:9b txqueuelen 1000 (Ethernet)
- RX packets 0 bytes 0 (0.0 B)
- RX errors 0 dropped 0 overruns 0 frame 0
- TX packets 0 bytes 0 (0.0 B)
- TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
-
-
-* Start forwarding with tx_first:
-
- .. code-block:: console
-
- testpmd> start tx_first
-
-* Quit and check forwarding stats:
-
- .. code-block:: console
-
- testpmd> quit
- Telling cores to stop...
- Waiting for lcores to finish...
-
- ---------------------- Forward statistics for port 0 ----------------------
- RX-packets: 35637905 RX-dropped: 0 RX-total: 35637905
- TX-packets: 35637947 TX-dropped: 0 TX-total: 35637947
- ----------------------------------------------------------------------------
-
- ---------------------- Forward statistics for port 1 ----------------------
- RX-packets: 35637915 RX-dropped: 0 RX-total: 35637915
- TX-packets: 35637937 TX-dropped: 0 TX-total: 35637937
- ----------------------------------------------------------------------------
-
- +++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
- RX-packets: 71275820 RX-dropped: 0 RX-total: 71275820
- TX-packets: 71275884 TX-dropped: 0 TX-total: 71275884
- ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index f5e54a5e9cfd..ba6247170dbb 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -10,15 +10,12 @@ we provide a virtio Poll Mode Driver (PMD) as a software solution, comparing to
for fast guest VM to guest VM communication and guest VM to host communication.
Vhost is a kernel acceleration module for virtio qemu backend.
-The DPDK extends kni to support vhost raw socket interface,
-which enables vhost to directly read/ write packets from/to a physical port.
-With this enhancement, virtio could achieve quite promising performance.
For basic qemu-KVM installation and other Intel EM poll mode driver in guest VM,
please refer to Chapter "Driver for VM Emulated Devices".
In this chapter, we will demonstrate usage of virtio PMD with two backends,
-standard qemu vhost back end and vhost kni back end.
+standard qemu vhost back end.
Virtio Implementation in DPDK
-----------------------------
@@ -89,93 +86,6 @@ The following prerequisites apply:
* When using legacy interface, ``SYS_RAWIO`` capability is required
for ``iopl()`` call to enable access to PCI I/O ports.
-Virtio with kni vhost Back End
-------------------------------
-
-This section demonstrates kni vhost back end example setup for Phy-VM Communication.
-
-.. _figure_host_vm_comms:
-
-.. figure:: img/host_vm_comms.*
-
- Host2VM Communication Example Using kni vhost Back End
-
-
-Host2VM communication example
-
-#. Load the kni kernel module:
-
- .. code-block:: console
-
- insmod rte_kni.ko
-
- Other basic DPDK preparations like hugepage enabling,
- UIO port binding are not listed here.
- Please refer to the *DPDK Getting Started Guide* for detailed instructions.
-
-#. Launch the kni user application:
-
- .. code-block:: console
-
- <build_dir>/examples/dpdk-kni -l 0-3 -n 4 -- -p 0x1 -P --config="(0,1,3)"
-
- This command generates one network device vEth0 for physical port.
- If specify more physical ports, the generated network device will be vEth1, vEth2, and so on.
-
- For each physical port, kni creates two user threads.
- One thread loops to fetch packets from the physical NIC port into the kni receive queue.
- The other user thread loops to send packets in the kni transmit queue.
-
- For each physical port, kni also creates a kernel thread that retrieves packets from the kni receive queue,
- place them onto kni's raw socket's queue and wake up the vhost kernel thread to exchange packets with the virtio virt queue.
-
- For more details about kni, please refer to :ref:`kni`.
-
-#. Enable the kni raw socket functionality for the specified physical NIC port,
- get the generated file descriptor and set it in the qemu command line parameter.
- Always remember to set ioeventfd_on and vhost_on.
-
- Example:
-
- .. code-block:: console
-
- echo 1 > /sys/class/net/vEth0/sock_en
- fd=`cat /sys/class/net/vEth0/sock_fd`
- exec qemu-system-x86_64 -enable-kvm -cpu host \
- -m 2048 -smp 4 -name dpdk-test1-vm1 \
- -drive file=/data/DPDKVMS/dpdk-vm.img \
- -netdev tap, fd=$fd,id=mynet_kni, script=no,vhost=on \
- -device virtio-net-pci,netdev=mynet_kni,bus=pci.0,addr=0x3,ioeventfd=on \
- -vnc:1 -daemonize
-
- In the above example, virtio port 0 in the guest VM will be associated with vEth0, which in turns corresponds to a physical port,
- which means received packets come from vEth0, and transmitted packets is sent to vEth0.
-
-#. In the guest, bind the virtio device to the uio_pci_generic kernel module and start the forwarding application.
- When the virtio port in guest bursts Rx, it is getting packets from the
- raw socket's receive queue.
- When the virtio port bursts Tx, it is sending packet to the tx_q.
-
- .. code-block:: console
-
- modprobe uio
- dpdk-hugepages.py --setup 1G
- modprobe uio_pci_generic
- ./usertools/dpdk-devbind.py -b uio_pci_generic 00:03.0
-
- We use testpmd as the forwarding application in this example.
-
- .. figure:: img/console.*
-
- Running testpmd
-
-#. Use IXIA packet generator to inject a packet stream into the KNI physical port.
-
- The packet reception and transmission flow path is:
-
- IXIA packet generator->82599 PF->KNI Rx queue->KNI raw socket queue->Guest
- VM virtio port 0 Rx burst->Guest VM virtio port 0 Tx burst-> KNI Tx queue
- ->82599 PF-> IXIA packet generator
Virtio with qemu virtio Back End
--------------------------------
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 93c8a031be56..5d382fdd9032 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -610,8 +610,6 @@ devices would fail anyway.
``RTE_PCI_DRV_NEED_IOVA_AS_VA`` flag is used to dictate that this PCI
driver can only work in RTE_IOVA_VA mode.
- When the KNI kernel module is detected, RTE_IOVA_PA mode is preferred as a
- performance penalty is expected in RTE_IOVA_VA mode.
IOVA Mode Configuration
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/prog_guide/glossary.rst b/doc/guides/prog_guide/glossary.rst
index fb0910ba5b3f..8d6349701e43 100644
--- a/doc/guides/prog_guide/glossary.rst
+++ b/doc/guides/prog_guide/glossary.rst
@@ -103,9 +103,6 @@ lcore
A logical execution unit of the processor, sometimes called a *hardware
thread*.
-KNI
- Kernel Network Interface
-
L1
Layer 1
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index c04847bfa148..2c47d9d010f4 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -53,7 +53,6 @@ Programmer's Guide
pcapng_lib
pdump_lib
multi_proc_support
- kernel_nic_interface
thread_safety_dpdk_functions
eventdev
event_ethernet_rx_adapter
diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
deleted file mode 100644
index 392e5df75fcf..000000000000
--- a/doc/guides/prog_guide/kernel_nic_interface.rst
+++ /dev/null
@@ -1,423 +0,0 @@
-.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2010-2015 Intel Corporation.
-
-.. _kni:
-
-Kernel NIC Interface
-====================
-
-.. note::
-
- KNI is deprecated and will be removed in future.
- See :doc:`../rel_notes/deprecation`.
-
- :ref:`virtio_user_as_exception_path` alternative is the preferred way
- for interfacing with the Linux network stack
- as it is an in-kernel solution and has similar performance expectations.
-
-.. note::
-
- KNI is disabled by default in the DPDK build.
- To re-enable the library, remove 'kni' from the "disable_libs" meson option when configuring a build.
-
-The DPDK Kernel NIC Interface (KNI) allows userspace applications access to the Linux* control plane.
-
-KNI provides an interface with the kernel network stack
-and allows management of DPDK ports using standard Linux net tools
-such as ``ethtool``, ``iproute2`` and ``tcpdump``.
-
-The main use case of KNI is to get/receive exception packets from/to Linux network stack
-while main datapath IO is done bypassing the networking stack.
-
-There are other alternatives to KNI, all are available in the upstream Linux:
-
-#. :ref:`virtio_user_as_exception_path`
-
-#. :doc:`../nics/tap` as wrapper to `Linux tun/tap
- <https://www.kernel.org/doc/Documentation/networking/tuntap.txt>`_
-
-The benefits of using the KNI against alternatives are:
-
-* Faster than existing Linux TUN/TAP interfaces
- (by eliminating system calls and copy_to_user()/copy_from_user() operations.
-
-The disadvantages of the KNI are:
-
-* It is out-of-tree Linux kernel module
- which makes updating and distributing the driver more difficult.
- Most users end up building the KNI driver from source
- which requires the packages and tools to build kernel modules.
-
-* As it shares memory between userspace and kernelspace,
- and kernel part directly uses input provided by userspace, it is not safe.
- This makes hard to upstream the module.
-
-* Requires dedicated kernel cores.
-
-* Only a subset of net devices control commands are supported by KNI.
-
-The components of an application using the DPDK Kernel NIC Interface are shown in :numref:`figure_kernel_nic_intf`.
-
-.. _figure_kernel_nic_intf:
-
-.. figure:: img/kernel_nic_intf.*
-
- Components of a DPDK KNI Application
-
-
-The DPDK KNI Kernel Module
---------------------------
-
-The KNI kernel loadable module ``rte_kni`` provides the kernel interface
-for DPDK applications.
-
-When the ``rte_kni`` module is loaded, it will create a device ``/dev/kni``
-that is used by the DPDK KNI API functions to control and communicate with
-the kernel module.
-
-The ``rte_kni`` kernel module contains several optional parameters which
-can be specified when the module is loaded to control its behavior:
-
-.. code-block:: console
-
- # modinfo rte_kni.ko
- <snip>
- parm: lo_mode: KNI loopback mode (default=lo_mode_none):
- lo_mode_none Kernel loopback disabled
- lo_mode_fifo Enable kernel loopback with fifo
- lo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer
- (charp)
- parm: kthread_mode: Kernel thread mode (default=single):
- single Single kernel thread mode enabled.
- multiple Multiple kernel thread mode enabled.
- (charp)
- parm: carrier: Default carrier state for KNI interface (default=off):
- off Interfaces will be created with carrier state set to off.
- on Interfaces will be created with carrier state set to on.
- (charp)
- parm: enable_bifurcated: Enable request processing support for
- bifurcated drivers, which means releasing rtnl_lock before calling
- userspace callback and supporting async requests (default=off):
- on Enable request processing support for bifurcated drivers.
- (charp)
- parm: min_scheduling_interval: KNI thread min scheduling interval (default=100 microseconds)
- (long)
- parm: max_scheduling_interval: KNI thread max scheduling interval (default=200 microseconds)
- (long)
-
-
-Loading the ``rte_kni`` kernel module without any optional parameters is
-the typical way a DPDK application gets packets into and out of the kernel
-network stack. Without any parameters, only one kernel thread is created
-for all KNI devices for packet receiving in kernel side, loopback mode is
-disabled, and the default carrier state of KNI interfaces is set to *off*.
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko
-
-.. _kni_loopback_mode:
-
-Loopback Mode
-~~~~~~~~~~~~~
-
-For testing, the ``rte_kni`` kernel module can be loaded in loopback mode
-by specifying the ``lo_mode`` parameter:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo
-
-The ``lo_mode_fifo`` loopback option will loop back ring enqueue/dequeue
-operations in kernel space.
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko lo_mode=lo_mode_fifo_skb
-
-The ``lo_mode_fifo_skb`` loopback option will loop back ring enqueue/dequeue
-operations and sk buffer copies in kernel space.
-
-If the ``lo_mode`` parameter is not specified, loopback mode is disabled.
-
-.. _kni_kernel_thread_mode:
-
-Kernel Thread Mode
-~~~~~~~~~~~~~~~~~~
-
-To provide flexibility of performance, the ``rte_kni`` KNI kernel module
-can be loaded with the ``kthread_mode`` parameter. The ``rte_kni`` kernel
-module supports two options: "single kernel thread" mode and "multiple
-kernel thread" mode.
-
-Single kernel thread mode is enabled as follows:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko kthread_mode=single
-
-This mode will create only one kernel thread for all KNI interfaces to
-receive data on the kernel side. By default, this kernel thread is not
-bound to any particular core, but the user can set the core affinity for
-this kernel thread by setting the ``core_id`` and ``force_bind`` parameters
-in ``struct rte_kni_conf`` when the first KNI interface is created:
-
-For optimum performance, the kernel thread should be bound to a core in
-on the same socket as the DPDK lcores used in the application.
-
-The KNI kernel module can also be configured to start a separate kernel
-thread for each KNI interface created by the DPDK application. Multiple
-kernel thread mode is enabled as follows:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko kthread_mode=multiple
-
-This mode will create a separate kernel thread for each KNI interface to
-receive data on the kernel side. The core affinity of each ``kni_thread``
-kernel thread can be specified by setting the ``core_id`` and ``force_bind``
-parameters in ``struct rte_kni_conf`` when each KNI interface is created.
-
-Multiple kernel thread mode can provide scalable higher performance if
-sufficient unused cores are available on the host system.
-
-If the ``kthread_mode`` parameter is not specified, the "single kernel
-thread" mode is used.
-
-.. _kni_default_carrier_state:
-
-Default Carrier State
-~~~~~~~~~~~~~~~~~~~~~
-
-The default carrier state of KNI interfaces created by the ``rte_kni``
-kernel module is controlled via the ``carrier`` option when the module
-is loaded.
-
-If ``carrier=off`` is specified, the kernel module will leave the carrier
-state of the interface *down* when the interface is management enabled.
-The DPDK application can set the carrier state of the KNI interface using the
-``rte_kni_update_link()`` function. This is useful for DPDK applications
-which require that the carrier state of the KNI interface reflect the
-actual link state of the corresponding physical NIC port.
-
-If ``carrier=on`` is specified, the kernel module will automatically set
-the carrier state of the interface to *up* when the interface is management
-enabled. This is useful for DPDK applications which use the KNI interface as
-a purely virtual interface that does not correspond to any physical hardware
-and do not wish to explicitly set the carrier state of the interface with
-``rte_kni_update_link()``. It is also useful for testing in loopback mode
-where the NIC port may not be physically connected to anything.
-
-To set the default carrier state to *on*:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko carrier=on
-
-To set the default carrier state to *off*:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko carrier=off
-
-If the ``carrier`` parameter is not specified, the default carrier state
-of KNI interfaces will be set to *off*.
-
-.. _kni_bifurcated_device_support:
-
-Bifurcated Device Support
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-User callbacks are executed while kernel module holds the ``rtnl`` lock, this
-causes a deadlock when callbacks run control commands on another Linux kernel
-network interface.
-
-Bifurcated devices has kernel network driver part and to prevent deadlock for
-them ``enable_bifurcated`` is used.
-
-To enable bifurcated device support:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko enable_bifurcated=on
-
-Enabling bifurcated device support releases ``rtnl`` lock before calling
-callback and locks it back after callback. Also enables asynchronous request to
-support callbacks that requires rtnl lock to work (interface down).
-
-KNI Kthread Scheduling
-~~~~~~~~~~~~~~~~~~~~~~
-
-The ``min_scheduling_interval`` and ``max_scheduling_interval`` parameters
-control the rescheduling interval of the KNI kthreads.
-
-This might be useful if we have use cases in which we require improved
-latency or performance for control plane traffic.
-
-The implementation is backed by Linux High Precision Timers, and uses ``usleep_range``.
-Hence, it will have the same granularity constraints as this Linux subsystem.
-
-For Linux High Precision Timers, you can check the following resource: `Kernel Timers <http://www.kernel.org/doc/Documentation/timers/timers-howto.txt>`_
-
-To set the ``min_scheduling_interval`` to a value of 100 microseconds:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko min_scheduling_interval=100
-
-To set the ``max_scheduling_interval`` to a value of 200 microseconds:
-
-.. code-block:: console
-
- # insmod <build_dir>/kernel/linux/kni/rte_kni.ko max_scheduling_interval=200
-
-If the ``min_scheduling_interval`` and ``max_scheduling_interval`` parameters are
-not specified, the default interval limits will be set to *100* and *200* respectively.
-
-KNI Creation and Deletion
--------------------------
-
-Before any KNI interfaces can be created, the ``rte_kni`` kernel module must
-be loaded into the kernel and configured with the ``rte_kni_init()`` function.
-
-The KNI interfaces are created by a DPDK application dynamically via the
-``rte_kni_alloc()`` function.
-
-The ``struct rte_kni_conf`` structure contains fields which allow the
-user to specify the interface name, set the MTU size, set an explicit or
-random MAC address and control the affinity of the kernel Rx thread(s)
-(both single and multi-threaded modes).
-By default the KNI sample example gets the MTU from the matching device,
-and in case of KNI PMD it is derived from mbuf buffer length.
-
-The ``struct rte_kni_ops`` structure contains pointers to functions to
-handle requests from the ``rte_kni`` kernel module. These functions
-allow DPDK applications to perform actions when the KNI interfaces are
-manipulated by control commands or functions external to the application.
-
-For example, the DPDK application may wish to enabled/disable a physical
-NIC port when a user enabled/disables a KNI interface with ``ip link set
-[up|down] dev <ifaceX>``. The DPDK application can register a callback for
-``config_network_if`` which will be called when the interface management
-state changes.
-
-There are currently four callbacks for which the user can register
-application functions:
-
-``config_network_if``:
-
- Called when the management state of the KNI interface changes.
- For example, when the user runs ``ip link set [up|down] dev <ifaceX>``.
-
-``change_mtu``:
-
- Called when the user changes the MTU size of the KNI
- interface. For example, when the user runs ``ip link set mtu <size>
- dev <ifaceX>``.
-
-``config_mac_address``:
-
- Called when the user changes the MAC address of the KNI interface.
- For example, when the user runs ``ip link set address <MAC>
- dev <ifaceX>``. If the user sets this callback function to NULL,
- but sets the ``port_id`` field to a value other than -1, a default
- callback handler in the rte_kni library ``kni_config_mac_address()``
- will be called which calls ``rte_eth_dev_default_mac_addr_set()``
- on the specified ``port_id``.
-
-``config_promiscusity``:
-
- Called when the user changes the promiscuity state of the KNI
- interface. For example, when the user runs ``ip link set promisc
- [on|off] dev <ifaceX>``. If the user sets this callback function to
- NULL, but sets the ``port_id`` field to a value other than -1, a default
- callback handler in the rte_kni library ``kni_config_promiscusity()``
- will be called which calls ``rte_eth_promiscuous_<enable|disable>()``
- on the specified ``port_id``.
-
-``config_allmulticast``:
-
- Called when the user changes the allmulticast state of the KNI interface.
- For example, when the user runs ``ifconfig <ifaceX> [-]allmulti``. If the
- user sets this callback function to NULL, but sets the ``port_id`` field to
- a value other than -1, a default callback handler in the rte_kni library
- ``kni_config_allmulticast()`` will be called which calls
- ``rte_eth_allmulticast_<enable|disable>()`` on the specified ``port_id``.
-
-In order to run these callbacks, the application must periodically call
-the ``rte_kni_handle_request()`` function. Any user callback function
-registered will be called directly from ``rte_kni_handle_request()`` so
-care must be taken to prevent deadlock and to not block any DPDK fastpath
-tasks. Typically DPDK applications which use these callbacks will need
-to create a separate thread or secondary process to periodically call
-``rte_kni_handle_request()``.
-
-The KNI interfaces can be deleted by a DPDK application with
-``rte_kni_release()``. All KNI interfaces not explicitly deleted will be
-deleted when the ``/dev/kni`` device is closed, either explicitly with
-``rte_kni_close()`` or when the DPDK application is closed.
-
-DPDK mbuf Flow
---------------
-
-To minimize the amount of DPDK code running in kernel space, the mbuf mempool is managed in userspace only.
-The kernel module will be aware of mbufs,
-but all mbuf allocation and free operations will be handled by the DPDK application only.
-
-:numref:`figure_pkt_flow_kni` shows a typical scenario with packets sent in both directions.
-
-.. _figure_pkt_flow_kni:
-
-.. figure:: img/pkt_flow_kni.*
-
- Packet Flow via mbufs in the DPDK KNI
-
-
-Use Case: Ingress
------------------
-
-On the DPDK RX side, the mbuf is allocated by the PMD in the RX thread context.
-This thread will enqueue the mbuf in the rx_q FIFO,
-and the next pointers in mbuf-chain will convert to physical address.
-The KNI thread will poll all KNI active devices for the rx_q.
-If an mbuf is dequeued, it will be converted to a sk_buff and sent to the net stack via netif_rx().
-The dequeued mbuf must be freed, so the same pointer is sent back in the free_q FIFO,
-and next pointers must convert back to virtual address if exists before put in the free_q FIFO.
-
-The RX thread, in the same main loop, polls this FIFO and frees the mbuf after dequeuing it.
-The address conversion of the next pointer is to prevent the chained mbuf
-in different hugepage segments from causing kernel crash.
-
-Use Case: Egress
-----------------
-
-For packet egress the DPDK application must first enqueue several mbufs to create an mbuf cache on the kernel side.
-
-The packet is received from the Linux net stack, by calling the kni_net_tx() callback.
-The mbuf is dequeued (without waiting due the cache) and filled with data from sk_buff.
-The sk_buff is then freed and the mbuf sent in the tx_q FIFO.
-
-The DPDK TX thread dequeues the mbuf and sends it to the PMD via ``rte_eth_tx_burst()``.
-It then puts the mbuf back in the cache.
-
-IOVA = VA: Support
-------------------
-
-KNI operates in IOVA_VA scheme when
-
-- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) and
-- EAL option `iova-mode=va` is passed or bus IOVA scheme in the DPDK is selected
- as RTE_IOVA_VA.
-
-Due to IOVA to KVA address translations, based on the KNI use case there
-can be a performance impact. For mitigation, forcing IOVA to PA via EAL
-"--iova-mode=pa" option can be used, IOVA_DC bus iommu scheme can also
-result in IOVA as PA.
-
-Ethtool
--------
-
-Ethtool is a Linux-specific tool with corresponding support in the kernel.
-The current version of kni provides minimal ethtool functionality
-including querying version and link state. It does not support link
-control, statistics, or dumping device registers.
diff --git a/doc/guides/prog_guide/packet_framework.rst b/doc/guides/prog_guide/packet_framework.rst
index 3d4e3b66cc5c..ebc69d8c3e75 100644
--- a/doc/guides/prog_guide/packet_framework.rst
+++ b/doc/guides/prog_guide/packet_framework.rst
@@ -87,18 +87,15 @@ Port Types
| | | management and hierarchical scheduling according to pre-defined SLAs. |
| | | |
+---+------------------+---------------------------------------------------------------------------------------+
- | 6 | KNI | Send/receive packets to/from Linux kernel space. |
- | | | |
- +---+------------------+---------------------------------------------------------------------------------------+
- | 7 | Source | Input port used as packet generator. Similar to Linux kernel /dev/zero character |
+ | 6 | Source | Input port used as packet generator. Similar to Linux kernel /dev/zero character |
| | | device. |
| | | |
+---+------------------+---------------------------------------------------------------------------------------+
- | 8 | Sink | Output port used to drop all input packets. Similar to Linux kernel /dev/null |
+ | 7 | Sink | Output port used to drop all input packets. Similar to Linux kernel /dev/null |
| | | character device. |
| | | |
+---+------------------+---------------------------------------------------------------------------------------+
- | 9 | Sym_crypto | Output port used to extract DPDK Cryptodev operations from a fixed offset of the |
+ | 8 | Sym_crypto | Output port used to extract DPDK Cryptodev operations from a fixed offset of the |
| | | packet and then enqueue to the Cryptodev PMD. Input port used to dequeue the |
| | | Cryptodev operations from the Cryptodev PMD and then retrieve the packets from them. |
+---+------------------+---------------------------------------------------------------------------------------+
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index ce5a8f0361cb..bb5d23c87669 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -35,7 +35,7 @@ Deprecation Notices
which also added support for standard atomics
(Ref: https://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html)
-* build: Enabling deprecated libraries (``kni``)
+* build: Enabling deprecated libraries
won't be possible anymore through the use of the ``disable_libs`` build option.
A new build option for deprecated libraries will be introduced instead.
@@ -78,13 +78,6 @@ Deprecation Notices
``__atomic_thread_fence`` must be used for patches that need to be merged in
20.08 onwards. This change will not introduce any performance degradation.
-* kni: The KNI kernel module and library are not recommended for use by new
- applications - other technologies such as virtio-user are recommended instead.
- Following the DPDK technical board
- `decision <https://mails.dpdk.org/archives/dev/2021-January/197077.html>`_
- and `refinement <https://mails.dpdk.org/archives/dev/2022-June/243596.html>`_,
- the KNI kernel module, library and PMD will be removed from the DPDK 23.11 release.
-
* lib: will fix extending some enum/define breaking the ABI. There are multiple
samples in DPDK that enum/define terminated with a ``.*MAX.*`` value which is
used by iterators, and arrays holding these values are sized with this
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 9d96dbdcd302..0d5c4a60d020 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -70,6 +70,8 @@ Removed Items
* flow_classify: Removed flow classification library and examples.
+* kni: Removed the Kernel Network Interface (KNI) library and driver.
+
API Changes
-----------
diff --git a/doc/guides/sample_app_ug/ip_pipeline.rst b/doc/guides/sample_app_ug/ip_pipeline.rst
index b521d3b8be20..f30ac5e19db7 100644
--- a/doc/guides/sample_app_ug/ip_pipeline.rst
+++ b/doc/guides/sample_app_ug/ip_pipeline.rst
@@ -164,15 +164,6 @@ Examples
| | | | 8. Pipeline table rule add default |
| | | | 9. Pipeline table rule add |
+-----------------------+----------------------+----------------+------------------------------------+
- | KNI | Stub | Forward | 1. Mempool create |
- | | | | 2. Link create |
- | | | | 3. Pipeline create |
- | | | | 4. Pipeline port in/out |
- | | | | 5. Pipeline table |
- | | | | 6. Pipeline port in table |
- | | | | 7. Pipeline enable |
- | | | | 8. Pipeline table rule add |
- +-----------------------+----------------------+----------------+------------------------------------+
| Firewall | ACL | Allow/Drop | 1. Mempool create |
| | | | 2. Link create |
| | * Key = n-tuple | | 3. Pipeline create |
@@ -297,17 +288,6 @@ Tap
tap <name>
-Kni
-~~~
-
- Create kni port ::
-
- kni <kni_name>
- link <link_name>
- mempool <mempool_name>
- [thread <thread_id>]
-
-
Cryptodev
~~~~~~~~~
@@ -366,7 +346,6 @@ Create pipeline input port ::
| swq <swq_name>
| tmgr <tmgr_name>
| tap <tap_name> mempool <mempool_name> mtu <mtu>
- | kni <kni_name>
| source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
[action <port_in_action_profile_name>]
[disabled]
@@ -379,7 +358,6 @@ Create pipeline output port ::
| swq <swq_name>
| tmgr <tmgr_name>
| tap <tap_name>
- | kni <kni_name>
| sink [file <file_name> pkts <max_n_pkts>]
Create pipeline table ::
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4b98faa72980..01b707b6c4ac 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1130,7 +1130,7 @@ nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
{
/* These dummy functions are required for supporting
* some applications which reconfigure queues without
- * stopping tx burst and rx burst threads(eg kni app)
+ * stopping tx burst and rx burst threads.
* When the queues context is saved, txq/rxqs are released
* which caused app crash since rx/tx burst is still
* on different lcores
diff --git a/drivers/net/kni/meson.build b/drivers/net/kni/meson.build
deleted file mode 100644
index 2acc98969426..000000000000
--- a/drivers/net/kni/meson.build
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Intel Corporation
-
-if is_windows
- build = false
- reason = 'not supported on Windows'
- subdir_done()
-endif
-
-deps += 'kni'
-sources = files('rte_eth_kni.c')
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
deleted file mode 100644
index c0e1f8db409e..000000000000
--- a/drivers/net/kni/rte_eth_kni.c
+++ /dev/null
@@ -1,524 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#include <fcntl.h>
-#include <pthread.h>
-#include <unistd.h>
-
-#include <rte_string_fns.h>
-#include <ethdev_driver.h>
-#include <ethdev_vdev.h>
-#include <rte_kni.h>
-#include <rte_kvargs.h>
-#include <rte_malloc.h>
-#include <bus_vdev_driver.h>
-
-/* Only single queue supported */
-#define KNI_MAX_QUEUE_PER_PORT 1
-
-#define MAX_KNI_PORTS 8
-
-#define KNI_ETHER_MTU(mbuf_size) \
- ((mbuf_size) - RTE_ETHER_HDR_LEN) /**< Ethernet MTU. */
-
-#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
-static const char * const valid_arguments[] = {
- ETH_KNI_NO_REQUEST_THREAD_ARG,
- NULL
-};
-
-struct eth_kni_args {
- int no_request_thread;
-};
-
-struct pmd_queue_stats {
- uint64_t pkts;
- uint64_t bytes;
-};
-
-struct pmd_queue {
- struct pmd_internals *internals;
- struct rte_mempool *mb_pool;
-
- struct pmd_queue_stats rx;
- struct pmd_queue_stats tx;
-};
-
-struct pmd_internals {
- struct rte_kni *kni;
- uint16_t port_id;
- int is_kni_started;
-
- pthread_t thread;
- int stop_thread;
- int no_request_thread;
-
- struct rte_ether_addr eth_addr;
-
- struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT];
- struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT];
-};
-
-static const struct rte_eth_link pmd_link = {
- .link_speed = RTE_ETH_SPEED_NUM_10G,
- .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
- .link_status = RTE_ETH_LINK_DOWN,
- .link_autoneg = RTE_ETH_LINK_FIXED,
-};
-static int is_kni_initialized;
-
-RTE_LOG_REGISTER_DEFAULT(eth_kni_logtype, NOTICE);
-
-#define PMD_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
- "%s(): " fmt "\n", __func__, ##args)
-static uint16_t
-eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
-{
- struct pmd_queue *kni_q = q;
- struct rte_kni *kni = kni_q->internals->kni;
- uint16_t nb_pkts;
- int i;
-
- nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
- for (i = 0; i < nb_pkts; i++)
- bufs[i]->port = kni_q->internals->port_id;
-
- kni_q->rx.pkts += nb_pkts;
-
- return nb_pkts;
-}
-
-static uint16_t
-eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
-{
- struct pmd_queue *kni_q = q;
- struct rte_kni *kni = kni_q->internals->kni;
- uint16_t nb_pkts;
-
- nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
-
- kni_q->tx.pkts += nb_pkts;
-
- return nb_pkts;
-}
-
-static void *
-kni_handle_request(void *param)
-{
- struct pmd_internals *internals = param;
-#define MS 1000
-
- while (!internals->stop_thread) {
- rte_kni_handle_request(internals->kni);
- usleep(500 * MS);
- }
-
- return param;
-}
-
-static int
-eth_kni_start(struct rte_eth_dev *dev)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- uint16_t port_id = dev->data->port_id;
- struct rte_mempool *mb_pool;
- struct rte_kni_conf conf = {{0}};
- const char *name = dev->device->name + 4; /* remove net_ */
-
- mb_pool = internals->rx_queues[0].mb_pool;
- strlcpy(conf.name, name, RTE_KNI_NAMESIZE);
- conf.force_bind = 0;
- conf.group_id = port_id;
- conf.mbuf_size =
- rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM;
- conf.mtu = KNI_ETHER_MTU(conf.mbuf_size);
-
- internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
- if (internals->kni == NULL) {
- PMD_LOG(ERR,
- "Fail to create kni interface for port: %d",
- port_id);
- return -1;
- }
-
- return 0;
-}
-
-static int
-eth_kni_dev_start(struct rte_eth_dev *dev)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- int ret;
-
- if (internals->is_kni_started == 0) {
- ret = eth_kni_start(dev);
- if (ret)
- return -1;
- internals->is_kni_started = 1;
- }
-
- if (internals->no_request_thread == 0) {
- internals->stop_thread = 0;
-
- ret = rte_ctrl_thread_create(&internals->thread,
- "kni_handle_req", NULL,
- kni_handle_request, internals);
- if (ret) {
- PMD_LOG(ERR,
- "Fail to create kni request thread");
- return -1;
- }
- }
-
- dev->data->dev_link.link_status = 1;
-
- return 0;
-}
-
-static int
-eth_kni_dev_stop(struct rte_eth_dev *dev)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- int ret;
-
- if (internals->no_request_thread == 0 && internals->stop_thread == 0) {
- internals->stop_thread = 1;
-
- ret = pthread_cancel(internals->thread);
- if (ret)
- PMD_LOG(ERR, "Can't cancel the thread");
-
- ret = pthread_join(internals->thread, NULL);
- if (ret)
- PMD_LOG(ERR, "Can't join the thread");
- }
-
- dev->data->dev_link.link_status = 0;
- dev->data->dev_started = 0;
-
- return 0;
-}
-
-static int
-eth_kni_close(struct rte_eth_dev *eth_dev)
-{
- struct pmd_internals *internals;
- int ret;
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
-
- ret = eth_kni_dev_stop(eth_dev);
- if (ret)
- PMD_LOG(WARNING, "Not able to stop kni for %s",
- eth_dev->data->name);
-
- /* mac_addrs must not be freed alone because part of dev_private */
- eth_dev->data->mac_addrs = NULL;
-
- internals = eth_dev->data->dev_private;
- ret = rte_kni_release(internals->kni);
- if (ret)
- PMD_LOG(WARNING, "Not able to release kni for %s",
- eth_dev->data->name);
-
- return ret;
-}
-
-static int
-eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused)
-{
- return 0;
-}
-
-static int
-eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
- struct rte_eth_dev_info *dev_info)
-{
- dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = UINT32_MAX;
- dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
- dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
- dev_info->min_rx_bufsize = 0;
-
- return 0;
-}
-
-static int
-eth_kni_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mb_pool)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- struct pmd_queue *q;
-
- q = &internals->rx_queues[rx_queue_id];
- q->internals = internals;
- q->mb_pool = mb_pool;
-
- dev->data->rx_queues[rx_queue_id] = q;
-
- return 0;
-}
-
-static int
-eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t tx_queue_id,
- uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- struct pmd_internals *internals = dev->data->dev_private;
- struct pmd_queue *q;
-
- q = &internals->tx_queues[tx_queue_id];
- q->internals = internals;
-
- dev->data->tx_queues[tx_queue_id] = q;
-
- return 0;
-}
-
-static int
-eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
-{
- return 0;
-}
-
-static int
-eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- unsigned long rx_packets_total = 0, rx_bytes_total = 0;
- unsigned long tx_packets_total = 0, tx_bytes_total = 0;
- struct rte_eth_dev_data *data = dev->data;
- unsigned int i, num_stats;
- struct pmd_queue *q;
-
- num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
- data->nb_rx_queues);
- for (i = 0; i < num_stats; i++) {
- q = data->rx_queues[i];
- stats->q_ipackets[i] = q->rx.pkts;
- stats->q_ibytes[i] = q->rx.bytes;
- rx_packets_total += stats->q_ipackets[i];
- rx_bytes_total += stats->q_ibytes[i];
- }
-
- num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
- data->nb_tx_queues);
- for (i = 0; i < num_stats; i++) {
- q = data->tx_queues[i];
- stats->q_opackets[i] = q->tx.pkts;
- stats->q_obytes[i] = q->tx.bytes;
- tx_packets_total += stats->q_opackets[i];
- tx_bytes_total += stats->q_obytes[i];
- }
-
- stats->ipackets = rx_packets_total;
- stats->ibytes = rx_bytes_total;
- stats->opackets = tx_packets_total;
- stats->obytes = tx_bytes_total;
-
- return 0;
-}
-
-static int
-eth_kni_stats_reset(struct rte_eth_dev *dev)
-{
- struct rte_eth_dev_data *data = dev->data;
- struct pmd_queue *q;
- unsigned int i;
-
- for (i = 0; i < data->nb_rx_queues; i++) {
- q = data->rx_queues[i];
- q->rx.pkts = 0;
- q->rx.bytes = 0;
- }
- for (i = 0; i < data->nb_tx_queues; i++) {
- q = data->tx_queues[i];
- q->tx.pkts = 0;
- q->tx.bytes = 0;
- }
-
- return 0;
-}
-
-static const struct eth_dev_ops eth_kni_ops = {
- .dev_start = eth_kni_dev_start,
- .dev_stop = eth_kni_dev_stop,
- .dev_close = eth_kni_close,
- .dev_configure = eth_kni_dev_configure,
- .dev_infos_get = eth_kni_dev_info,
- .rx_queue_setup = eth_kni_rx_queue_setup,
- .tx_queue_setup = eth_kni_tx_queue_setup,
- .link_update = eth_kni_link_update,
- .stats_get = eth_kni_stats_get,
- .stats_reset = eth_kni_stats_reset,
-};
-
-static struct rte_eth_dev *
-eth_kni_create(struct rte_vdev_device *vdev,
- struct eth_kni_args *args,
- unsigned int numa_node)
-{
- struct pmd_internals *internals;
- struct rte_eth_dev_data *data;
- struct rte_eth_dev *eth_dev;
-
- PMD_LOG(INFO, "Creating kni ethdev on numa socket %u",
- numa_node);
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
- if (!eth_dev)
- return NULL;
-
- internals = eth_dev->data->dev_private;
- internals->port_id = eth_dev->data->port_id;
- data = eth_dev->data;
- data->nb_rx_queues = 1;
- data->nb_tx_queues = 1;
- data->dev_link = pmd_link;
- data->mac_addrs = &internals->eth_addr;
- data->promiscuous = 1;
- data->all_multicast = 1;
- data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
-
- rte_eth_random_addr(internals->eth_addr.addr_bytes);
-
- eth_dev->dev_ops = ð_kni_ops;
-
- internals->no_request_thread = args->no_request_thread;
-
- return eth_dev;
-}
-
-static int
-kni_init(void)
-{
- int ret;
-
- if (is_kni_initialized == 0) {
- ret = rte_kni_init(MAX_KNI_PORTS);
- if (ret < 0)
- return ret;
- }
-
- is_kni_initialized++;
-
- return 0;
-}
-
-static int
-eth_kni_kvargs_process(struct eth_kni_args *args, const char *params)
-{
- struct rte_kvargs *kvlist;
-
- kvlist = rte_kvargs_parse(params, valid_arguments);
- if (kvlist == NULL)
- return -1;
-
- memset(args, 0, sizeof(struct eth_kni_args));
-
- if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1)
- args->no_request_thread = 1;
-
- rte_kvargs_free(kvlist);
-
- return 0;
-}
-
-static int
-eth_kni_probe(struct rte_vdev_device *vdev)
-{
- struct rte_eth_dev *eth_dev;
- struct eth_kni_args args;
- const char *name;
- const char *params;
- int ret;
-
- name = rte_vdev_device_name(vdev);
- params = rte_vdev_device_args(vdev);
- PMD_LOG(INFO, "Initializing eth_kni for %s", name);
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- eth_dev = rte_eth_dev_attach_secondary(name);
- if (!eth_dev) {
- PMD_LOG(ERR, "Failed to probe %s", name);
- return -1;
- }
- /* TODO: request info from primary to set up Rx and Tx */
- eth_dev->dev_ops = ð_kni_ops;
- eth_dev->device = &vdev->device;
- rte_eth_dev_probing_finish(eth_dev);
- return 0;
- }
-
- ret = eth_kni_kvargs_process(&args, params);
- if (ret < 0)
- return ret;
-
- ret = kni_init();
- if (ret < 0)
- return ret;
-
- eth_dev = eth_kni_create(vdev, &args, rte_socket_id());
- if (eth_dev == NULL)
- goto kni_uninit;
-
- eth_dev->rx_pkt_burst = eth_kni_rx;
- eth_dev->tx_pkt_burst = eth_kni_tx;
-
- rte_eth_dev_probing_finish(eth_dev);
- return 0;
-
-kni_uninit:
- is_kni_initialized--;
- if (is_kni_initialized == 0)
- rte_kni_close();
- return -1;
-}
-
-static int
-eth_kni_remove(struct rte_vdev_device *vdev)
-{
- struct rte_eth_dev *eth_dev;
- const char *name;
- int ret;
-
- name = rte_vdev_device_name(vdev);
- PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
-
- /* find the ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev != NULL) {
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- ret = eth_kni_dev_stop(eth_dev);
- if (ret != 0)
- return ret;
- return rte_eth_dev_release_port(eth_dev);
- }
- eth_kni_close(eth_dev);
- rte_eth_dev_release_port(eth_dev);
- }
-
- is_kni_initialized--;
- if (is_kni_initialized == 0)
- rte_kni_close();
-
- return 0;
-}
-
-static struct rte_vdev_driver eth_kni_drv = {
- .probe = eth_kni_probe,
- .remove = eth_kni_remove,
-};
-
-RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
-RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index f68bbc27a784..bd38b533c573 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -35,7 +35,6 @@ drivers = [
'ionic',
'ipn3ke',
'ixgbe',
- 'kni',
'mana',
'memif',
'mlx4',
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 785c7ee38ce5..bc5e0a9f1800 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -8,7 +8,6 @@ APP = ip_pipeline
SRCS-y := action.c
SRCS-y += cli.c
SRCS-y += conn.c
-SRCS-y += kni.c
SRCS-y += link.c
SRCS-y += main.c
SRCS-y += mempool.c
diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c
index c918f30e06f3..e8269ea90c11 100644
--- a/examples/ip_pipeline/cli.c
+++ b/examples/ip_pipeline/cli.c
@@ -14,7 +14,6 @@
#include "cli.h"
#include "cryptodev.h"
-#include "kni.h"
#include "link.h"
#include "mempool.h"
#include "parser.h"
@@ -728,65 +727,6 @@ cmd_tap(char **tokens,
}
}
-static const char cmd_kni_help[] =
-"kni <kni_name>\n"
-" link <link_name>\n"
-" mempool <mempool_name>\n"
-" [thread <thread_id>]\n";
-
-static void
-cmd_kni(char **tokens,
- uint32_t n_tokens,
- char *out,
- size_t out_size)
-{
- struct kni_params p;
- char *name;
- struct kni *kni;
-
- memset(&p, 0, sizeof(p));
- if ((n_tokens != 6) && (n_tokens != 8)) {
- snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
- return;
- }
-
- name = tokens[1];
-
- if (strcmp(tokens[2], "link") != 0) {
- snprintf(out, out_size, MSG_ARG_NOT_FOUND, "link");
- return;
- }
-
- p.link_name = tokens[3];
-
- if (strcmp(tokens[4], "mempool") != 0) {
- snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mempool");
- return;
- }
-
- p.mempool_name = tokens[5];
-
- if (n_tokens == 8) {
- if (strcmp(tokens[6], "thread") != 0) {
- snprintf(out, out_size, MSG_ARG_NOT_FOUND, "thread");
- return;
- }
-
- if (parser_read_uint32(&p.thread_id, tokens[7]) != 0) {
- snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
- return;
- }
-
- p.force_bind = 1;
- } else
- p.force_bind = 0;
-
- kni = kni_create(name, &p);
- if (kni == NULL) {
- snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
- return;
- }
-}
static const char cmd_cryptodev_help[] =
"cryptodev <cryptodev_name>\n"
@@ -1541,7 +1481,6 @@ static const char cmd_pipeline_port_in_help[] =
" | swq <swq_name>\n"
" | tmgr <tmgr_name>\n"
" | tap <tap_name> mempool <mempool_name> mtu <mtu>\n"
-" | kni <kni_name>\n"
" | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>\n"
" | cryptodev <cryptodev_name> rxq <queue_id>\n"
" [action <port_in_action_profile_name>]\n"
@@ -1664,18 +1603,6 @@ cmd_pipeline_port_in(char **tokens,
}
t0 += 6;
- } else if (strcmp(tokens[t0], "kni") == 0) {
- if (n_tokens < t0 + 2) {
- snprintf(out, out_size, MSG_ARG_MISMATCH,
- "pipeline port in kni");
- return;
- }
-
- p.type = PORT_IN_KNI;
-
- p.dev_name = tokens[t0 + 1];
-
- t0 += 2;
} else if (strcmp(tokens[t0], "source") == 0) {
if (n_tokens < t0 + 6) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1781,7 +1708,6 @@ static const char cmd_pipeline_port_out_help[] =
" | swq <swq_name>\n"
" | tmgr <tmgr_name>\n"
" | tap <tap_name>\n"
-" | kni <kni_name>\n"
" | sink [file <file_name> pkts <max_n_pkts>]\n"
" | cryptodev <cryptodev_name> txq <txq_id> offset <crypto_op_offset>\n";
@@ -1873,16 +1799,6 @@ cmd_pipeline_port_out(char **tokens,
p.type = PORT_OUT_TAP;
- p.dev_name = tokens[7];
- } else if (strcmp(tokens[6], "kni") == 0) {
- if (n_tokens != 8) {
- snprintf(out, out_size, MSG_ARG_MISMATCH,
- "pipeline port out kni");
- return;
- }
-
- p.type = PORT_OUT_KNI;
-
p.dev_name = tokens[7];
} else if (strcmp(tokens[6], "sink") == 0) {
if ((n_tokens != 7) && (n_tokens != 11)) {
@@ -6038,7 +5954,6 @@ cmd_help(char **tokens, uint32_t n_tokens, char *out, size_t out_size)
"\ttmgr subport\n"
"\ttmgr subport pipe\n"
"\ttap\n"
- "\tkni\n"
"\tport in action profile\n"
"\ttable action profile\n"
"\tpipeline\n"
@@ -6124,11 +6039,6 @@ cmd_help(char **tokens, uint32_t n_tokens, char *out, size_t out_size)
return;
}
- if (strcmp(tokens[0], "kni") == 0) {
- snprintf(out, out_size, "\n%s\n", cmd_kni_help);
- return;
- }
-
if (strcmp(tokens[0], "cryptodev") == 0) {
snprintf(out, out_size, "\n%s\n", cmd_cryptodev_help);
return;
@@ -6436,11 +6346,6 @@ cli_process(char *in, char *out, size_t out_size)
return;
}
- if (strcmp(tokens[0], "kni") == 0) {
- cmd_kni(tokens, n_tokens, out, out_size);
- return;
- }
-
if (strcmp(tokens[0], "cryptodev") == 0) {
cmd_cryptodev(tokens, n_tokens, out, out_size);
return;
diff --git a/examples/ip_pipeline/examples/kni.cli b/examples/ip_pipeline/examples/kni.cli
deleted file mode 100644
index 143834093d4d..000000000000
--- a/examples/ip_pipeline/examples/kni.cli
+++ /dev/null
@@ -1,69 +0,0 @@
-; SPDX-License-Identifier: BSD-3-Clause
-; Copyright(c) 2010-2018 Intel Corporation
-
-; _______________ ______________________
-; | | KNI0 | |
-; LINK0 RXQ0 --->|...............|------->|--+ |
-; | | KNI1 | | br0 |
-; LINK1 TXQ0 <---|...............|<-------|<-+ |
-; | | | Linux Kernel |
-; | PIPELINE0 | | Network Stack |
-; | | KNI1 | |
-; LINK1 RXQ0 --->|...............|------->|--+ |
-; | | KNI0 | | br0 |
-; LINK0 TXQ0 <---|...............|<-------|<-+ |
-; |_______________| |______________________|
-;
-; Insert Linux kernel KNI module:
-; [Linux]$ insmod rte_kni.ko
-;
-; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
-; [Linux]$ brctl addbr br0
-; [Linux]$ brctl addif br0 KNI0
-; [Linux]$ brctl addif br0 KNI1
-; [Linux]$ ifconfig br0 up
-; [Linux]$ ifconfig KNI0 up
-; [Linux]$ ifconfig KNI1 up
-;
-; Monitor packet forwarding performed by Linux kernel between KNI0 and KNI1:
-; [Linux]$ tcpdump -i KNI0
-; [Linux]$ tcpdump -i KNI1
-
-mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
-
-link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
-link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
-
-kni KNI0 link LINK0 mempool MEMPOOL0
-kni KNI1 link LINK1 mempool MEMPOOL0
-
-table action profile AP0 ipv4 offset 270 fwd
-
-pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
-
-pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
-pipeline PIPELINE0 port in bsz 32 kni KNI1
-pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
-pipeline PIPELINE0 port in bsz 32 kni KNI0
-
-pipeline PIPELINE0 port out bsz 32 kni KNI0
-pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
-pipeline PIPELINE0 port out bsz 32 kni KNI1
-pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
-
-pipeline PIPELINE0 table match stub action AP0
-pipeline PIPELINE0 table match stub action AP0
-pipeline PIPELINE0 table match stub action AP0
-pipeline PIPELINE0 table match stub action AP0
-
-pipeline PIPELINE0 port in 0 table 0
-pipeline PIPELINE0 port in 1 table 1
-pipeline PIPELINE0 port in 2 table 2
-pipeline PIPELINE0 port in 3 table 3
-
-thread 1 pipeline PIPELINE0 enable
-
-pipeline PIPELINE0 table 0 rule add match default action fwd port 0
-pipeline PIPELINE0 table 1 rule add match default action fwd port 1
-pipeline PIPELINE0 table 2 rule add match default action fwd port 2
-pipeline PIPELINE0 table 3 rule add match default action fwd port 3
diff --git a/examples/ip_pipeline/kni.c b/examples/ip_pipeline/kni.c
deleted file mode 100644
index cd02c3947827..000000000000
--- a/examples/ip_pipeline/kni.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2018 Intel Corporation
- */
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <rte_ethdev.h>
-#include <rte_string_fns.h>
-
-#include "kni.h"
-#include "mempool.h"
-#include "link.h"
-
-static struct kni_list kni_list;
-
-#ifndef KNI_MAX
-#define KNI_MAX 16
-#endif
-
-int
-kni_init(void)
-{
- TAILQ_INIT(&kni_list);
-
-#ifdef RTE_LIB_KNI
- rte_kni_init(KNI_MAX);
-#endif
-
- return 0;
-}
-
-struct kni *
-kni_find(const char *name)
-{
- struct kni *kni;
-
- if (name == NULL)
- return NULL;
-
- TAILQ_FOREACH(kni, &kni_list, node)
- if (strcmp(kni->name, name) == 0)
- return kni;
-
- return NULL;
-}
-
-#ifndef RTE_LIB_KNI
-
-struct kni *
-kni_create(const char *name __rte_unused,
- struct kni_params *params __rte_unused)
-{
- return NULL;
-}
-
-void
-kni_handle_request(void)
-{
- return;
-}
-
-#else
-
-static int
-kni_config_network_interface(uint16_t port_id, uint8_t if_up)
-{
- int ret = 0;
-
- if (!rte_eth_dev_is_valid_port(port_id))
- return -EINVAL;
-
- ret = (if_up) ?
- rte_eth_dev_set_link_up(port_id) :
- rte_eth_dev_set_link_down(port_id);
-
- return ret;
-}
-
-static int
-kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
-{
- int ret;
-
- if (!rte_eth_dev_is_valid_port(port_id))
- return -EINVAL;
-
- if (new_mtu > RTE_ETHER_MAX_LEN)
- return -EINVAL;
-
- /* Set new MTU */
- ret = rte_eth_dev_set_mtu(port_id, new_mtu);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-struct kni *
-kni_create(const char *name, struct kni_params *params)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_kni_conf kni_conf;
- struct rte_kni_ops kni_ops;
- struct kni *kni;
- struct mempool *mempool;
- struct link *link;
- struct rte_kni *k;
- int ret;
-
- /* Check input params */
- if ((name == NULL) ||
- kni_find(name) ||
- (params == NULL))
- return NULL;
-
- mempool = mempool_find(params->mempool_name);
- link = link_find(params->link_name);
- if ((mempool == NULL) ||
- (link == NULL))
- return NULL;
-
- /* Resource create */
- ret = rte_eth_dev_info_get(link->port_id, &dev_info);
- if (ret != 0)
- return NULL;
-
- memset(&kni_conf, 0, sizeof(kni_conf));
- strlcpy(kni_conf.name, name, RTE_KNI_NAMESIZE);
- kni_conf.force_bind = params->force_bind;
- kni_conf.core_id = params->thread_id;
- kni_conf.group_id = link->port_id;
- kni_conf.mbuf_size = mempool->buffer_size;
-
- memset(&kni_ops, 0, sizeof(kni_ops));
- kni_ops.port_id = link->port_id;
- kni_ops.config_network_if = kni_config_network_interface;
- kni_ops.change_mtu = kni_change_mtu;
-
- k = rte_kni_alloc(mempool->m, &kni_conf, &kni_ops);
- if (k == NULL)
- return NULL;
-
- /* Node allocation */
- kni = calloc(1, sizeof(struct kni));
- if (kni == NULL)
- return NULL;
-
- /* Node fill in */
- strlcpy(kni->name, name, sizeof(kni->name));
- kni->k = k;
-
- /* Node add to list */
- TAILQ_INSERT_TAIL(&kni_list, kni, node);
-
- return kni;
-}
-
-void
-kni_handle_request(void)
-{
- struct kni *kni;
-
- TAILQ_FOREACH(kni, &kni_list, node)
- rte_kni_handle_request(kni->k);
-}
-
-#endif
diff --git a/examples/ip_pipeline/kni.h b/examples/ip_pipeline/kni.h
deleted file mode 100644
index 118f48df73d8..000000000000
--- a/examples/ip_pipeline/kni.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2018 Intel Corporation
- */
-
-#ifndef _INCLUDE_KNI_H_
-#define _INCLUDE_KNI_H_
-
-#include <stdint.h>
-#include <sys/queue.h>
-
-#ifdef RTE_LIB_KNI
-#include <rte_kni.h>
-#endif
-
-#include "common.h"
-
-struct kni {
- TAILQ_ENTRY(kni) node;
- char name[NAME_SIZE];
-#ifdef RTE_LIB_KNI
- struct rte_kni *k;
-#endif
-};
-
-TAILQ_HEAD(kni_list, kni);
-
-int
-kni_init(void);
-
-struct kni *
-kni_find(const char *name);
-
-struct kni_params {
- const char *link_name;
- const char *mempool_name;
- int force_bind;
- uint32_t thread_id;
-};
-
-struct kni *
-kni_create(const char *name, struct kni_params *params);
-
-void
-kni_handle_request(void);
-
-#endif /* _INCLUDE_KNI_H_ */
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
index e35d9bce3984..663f538f024a 100644
--- a/examples/ip_pipeline/main.c
+++ b/examples/ip_pipeline/main.c
@@ -14,7 +14,6 @@
#include "cli.h"
#include "conn.h"
-#include "kni.h"
#include "cryptodev.h"
#include "link.h"
#include "mempool.h"
@@ -205,13 +204,6 @@ main(int argc, char **argv)
return status;
}
- /* KNI */
- status = kni_init();
- if (status) {
- printf("Error: KNI initialization failed (%d)\n", status);
- return status;
- }
-
/* Sym Crypto */
status = cryptodev_init();
if (status) {
@@ -264,7 +256,5 @@ main(int argc, char **argv)
conn_poll_for_conn(conn);
conn_poll_for_msg(conn);
-
- kni_handle_request();
}
}
diff --git a/examples/ip_pipeline/meson.build b/examples/ip_pipeline/meson.build
index 57f522c24cf9..68049157e429 100644
--- a/examples/ip_pipeline/meson.build
+++ b/examples/ip_pipeline/meson.build
@@ -18,7 +18,6 @@ sources = files(
'cli.c',
'conn.c',
'cryptodev.c',
- 'kni.c',
'link.c',
'main.c',
'mempool.c',
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
index 7ebabcae984d..63352257c6e9 100644
--- a/examples/ip_pipeline/pipeline.c
+++ b/examples/ip_pipeline/pipeline.c
@@ -11,9 +11,6 @@
#include <rte_string_fns.h>
#include <rte_port_ethdev.h>
-#ifdef RTE_LIB_KNI
-#include <rte_port_kni.h>
-#endif
#include <rte_port_ring.h>
#include <rte_port_source_sink.h>
#include <rte_port_fd.h>
@@ -28,9 +25,6 @@
#include <rte_table_lpm_ipv6.h>
#include <rte_table_stub.h>
-#ifdef RTE_LIB_KNI
-#include "kni.h"
-#endif
#include "link.h"
#include "mempool.h"
#include "pipeline.h"
@@ -160,9 +154,6 @@ pipeline_port_in_create(const char *pipeline_name,
struct rte_port_ring_reader_params ring;
struct rte_port_sched_reader_params sched;
struct rte_port_fd_reader_params fd;
-#ifdef RTE_LIB_KNI
- struct rte_port_kni_reader_params kni;
-#endif
struct rte_port_source_params source;
struct rte_port_sym_crypto_reader_params sym_crypto;
} pp;
@@ -264,22 +255,6 @@ pipeline_port_in_create(const char *pipeline_name,
break;
}
-#ifdef RTE_LIB_KNI
- case PORT_IN_KNI:
- {
- struct kni *kni;
-
- kni = kni_find(params->dev_name);
- if (kni == NULL)
- return -1;
-
- pp.kni.kni = kni->k;
-
- p.ops = &rte_port_kni_reader_ops;
- p.arg_create = &pp.kni;
- break;
- }
-#endif
case PORT_IN_SOURCE:
{
@@ -404,9 +379,6 @@ pipeline_port_out_create(const char *pipeline_name,
struct rte_port_ring_writer_params ring;
struct rte_port_sched_writer_params sched;
struct rte_port_fd_writer_params fd;
-#ifdef RTE_LIB_KNI
- struct rte_port_kni_writer_params kni;
-#endif
struct rte_port_sink_params sink;
struct rte_port_sym_crypto_writer_params sym_crypto;
} pp;
@@ -415,9 +387,6 @@ pipeline_port_out_create(const char *pipeline_name,
struct rte_port_ethdev_writer_nodrop_params ethdev;
struct rte_port_ring_writer_nodrop_params ring;
struct rte_port_fd_writer_nodrop_params fd;
-#ifdef RTE_LIB_KNI
- struct rte_port_kni_writer_nodrop_params kni;
-#endif
struct rte_port_sym_crypto_writer_nodrop_params sym_crypto;
} pp_nodrop;
@@ -537,32 +506,6 @@ pipeline_port_out_create(const char *pipeline_name,
break;
}
-#ifdef RTE_LIB_KNI
- case PORT_OUT_KNI:
- {
- struct kni *kni;
-
- kni = kni_find(params->dev_name);
- if (kni == NULL)
- return -1;
-
- pp.kni.kni = kni->k;
- pp.kni.tx_burst_sz = params->burst_size;
-
- pp_nodrop.kni.kni = kni->k;
- pp_nodrop.kni.tx_burst_sz = params->burst_size;
- pp_nodrop.kni.n_retries = params->n_retries;
-
- if (params->retry == 0) {
- p.ops = &rte_port_kni_writer_ops;
- p.arg_create = &pp.kni;
- } else {
- p.ops = &rte_port_kni_writer_nodrop_ops;
- p.arg_create = &pp_nodrop.kni;
- }
- break;
- }
-#endif
case PORT_OUT_SINK:
{
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
index 4d2ee29a54c7..083d5e852421 100644
--- a/examples/ip_pipeline/pipeline.h
+++ b/examples/ip_pipeline/pipeline.h
@@ -25,7 +25,6 @@ enum port_in_type {
PORT_IN_SWQ,
PORT_IN_TMGR,
PORT_IN_TAP,
- PORT_IN_KNI,
PORT_IN_SOURCE,
PORT_IN_CRYPTODEV,
};
@@ -67,7 +66,6 @@ enum port_out_type {
PORT_OUT_SWQ,
PORT_OUT_TMGR,
PORT_OUT_TAP,
- PORT_OUT_KNI,
PORT_OUT_SINK,
PORT_OUT_CRYPTODEV,
};
diff --git a/kernel/linux/kni/Kbuild b/kernel/linux/kni/Kbuild
deleted file mode 100644
index e5452d6c00db..000000000000
--- a/kernel/linux/kni/Kbuild
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
-
-ccflags-y := $(MODULE_CFLAGS)
-obj-m := rte_kni.o
-rte_kni-y := $(patsubst $(src)/%.c,%.o,$(wildcard $(src)/*.c))
diff --git a/kernel/linux/kni/compat.h b/kernel/linux/kni/compat.h
deleted file mode 100644
index 8beb67046577..000000000000
--- a/kernel/linux/kni/compat.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Minimal wrappers to allow compiling kni on older kernels.
- */
-
-#include <linux/version.h>
-
-#ifndef RHEL_RELEASE_VERSION
-#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
-#endif
-
-/* SuSE version macro is the same as Linux kernel version */
-#ifndef SLE_VERSION
-#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
-#endif
-#ifdef CONFIG_SUSE_KERNEL
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
-/* SLES12SP3 is at least 4.4.57+ based */
-#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 28))
-/* SLES12 is at least 3.12.28+ based */
-#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0)
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 61)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)))
-/* SLES11 SP3 is at least 3.0.61+ based */
-#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0)
-#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32))
-/* SLES11 SP1 is 2.6.32 based */
-#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0)
-#elif (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 27))
-/* SLES11 GA is 2.6.27 based */
-#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0)
-#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
-#endif /* CONFIG_SUSE_KERNEL */
-#ifndef SLE_VERSION_CODE
-#define SLE_VERSION_CODE 0
-#endif /* SLE_VERSION_CODE */
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
- (!(defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
-
-#define kstrtoul strict_strtoul
-
-#endif /* < 2.6.39 */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
-#define HAVE_SIMPLIFIED_PERNET_OPERATIONS
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
-#define sk_sleep(s) ((s)->sk_sleep)
-#else
-#define HAVE_SOCKET_WQ
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
-#define HAVE_STATIC_SOCK_MAP_FD
-#else
-#define kni_sock_map_fd(s) sock_map_fd(s, 0)
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
-#define HAVE_CHANGE_CARRIER_CB
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
-#define ether_addr_copy(dst, src) memcpy(dst, src, ETH_ALEN)
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
-#define HAVE_IOV_ITER_MSGHDR
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
-#define HAVE_KIOCB_MSG_PARAM
-#define HAVE_REBUILD_HEADER
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
-#define HAVE_SK_ALLOC_KERN_PARAM
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || \
- (defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) || \
- (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(12, 3, 0))
-#define HAVE_TRANS_START_HELPER
-#endif
-
-/*
- * KNI uses NET_NAME_UNKNOWN macro to select correct version of alloc_netdev()
- * For old kernels just backported the commit that enables the macro
- * (685343fc3ba6) but still uses old API, it is required to undefine macro to
- * select correct version of API, this is safe since KNI doesn't use the value.
- * This fix is specific to RedHat/CentOS kernels.
- */
-#if (defined(RHEL_RELEASE_CODE) && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)))
-#undef NET_NAME_UNKNOWN
-#endif
-
-/*
- * RHEL has two different version with different kernel version:
- * 3.10 is for AMD, Intel, IBM POWER7 and POWER8;
- * 4.14 is for ARM and IBM POWER9
- */
-#if (defined(RHEL_RELEASE_CODE) && \
- (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)) && \
- (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)))
-#define ndo_change_mtu ndo_change_mtu_rh74
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
-#define HAVE_MAX_MTU_PARAM
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
-#define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
-#endif
-
-/*
- * iova to kva mapping support can be provided since 4.6.0, but required
- * kernel version increased to >= 4.10.0 because of the updates in
- * get_user_pages_remote() kernel API
- */
-#if KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE
-#define HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
-#endif
-
-#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE || \
- (defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_VERSION(8, 3) <= RHEL_RELEASE_CODE) || \
- (defined(CONFIG_SUSE_KERNEL) && defined(HAVE_ARG_TX_QUEUE))
-#define HAVE_TX_TIMEOUT_TXQUEUE
-#endif
-
-#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
-#define HAVE_TSK_IN_GUP
-#endif
-
-#if KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE
-#define HAVE_ETH_HW_ADDR_SET
-#endif
-
-#if KERNEL_VERSION(5, 18, 0) > LINUX_VERSION_CODE && \
- (!(defined(RHEL_RELEASE_CODE) && \
- RHEL_RELEASE_VERSION(9, 1) <= RHEL_RELEASE_CODE))
-#define HAVE_NETIF_RX_NI
-#endif
-
-#if KERNEL_VERSION(6, 5, 0) > LINUX_VERSION_CODE
-#define HAVE_VMA_IN_GUP
-#endif
diff --git a/kernel/linux/kni/kni_dev.h b/kernel/linux/kni/kni_dev.h
deleted file mode 100644
index 975379825b2d..000000000000
--- a/kernel/linux/kni/kni_dev.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-#ifndef _KNI_DEV_H_
-#define _KNI_DEV_H_
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define KNI_VERSION "1.0"
-
-#include "compat.h"
-
-#include <linux/if.h>
-#include <linux/wait.h>
-#ifdef HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
-#include <linux/sched/signal.h>
-#else
-#include <linux/sched.h>
-#endif
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <rte_kni_common.h>
-#define KNI_KTHREAD_MAX_RESCHEDULE_INTERVAL 1000000 /* us */
-
-#define MBUF_BURST_SZ 32
-
-/* Default carrier state for created KNI network interfaces */
-extern uint32_t kni_dflt_carrier;
-
-/* Request processing support for bifurcated drivers. */
-extern uint32_t bifurcated_support;
-
-/**
- * A structure describing the private information for a kni device.
- */
-struct kni_dev {
- /* kni list */
- struct list_head list;
-
- uint8_t iova_mode;
-
- uint32_t core_id; /* Core ID to bind */
- char name[RTE_KNI_NAMESIZE]; /* Network device name */
- struct task_struct *pthread;
-
- /* wait queue for req/resp */
- wait_queue_head_t wq;
- struct mutex sync_lock;
-
- /* kni device */
- struct net_device *net_dev;
-
- /* queue for packets to be sent out */
- struct rte_kni_fifo *tx_q;
-
- /* queue for the packets received */
- struct rte_kni_fifo *rx_q;
-
- /* queue for the allocated mbufs those can be used to save sk buffs */
- struct rte_kni_fifo *alloc_q;
-
- /* free queue for the mbufs to be freed */
- struct rte_kni_fifo *free_q;
-
- /* request queue */
- struct rte_kni_fifo *req_q;
-
- /* response queue */
- struct rte_kni_fifo *resp_q;
-
- void *sync_kva;
- void *sync_va;
-
- void *mbuf_kva;
- void *mbuf_va;
-
- /* mbuf size */
- uint32_t mbuf_size;
-
- /* buffers */
- void *pa[MBUF_BURST_SZ];
- void *va[MBUF_BURST_SZ];
- void *alloc_pa[MBUF_BURST_SZ];
- void *alloc_va[MBUF_BURST_SZ];
-
- struct task_struct *usr_tsk;
-};
-
-#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
-static inline phys_addr_t iova_to_phys(struct task_struct *tsk,
- unsigned long iova)
-{
- phys_addr_t offset, phys_addr;
- struct page *page = NULL;
- long ret;
-
- offset = iova & (PAGE_SIZE - 1);
-
- /* Read one page struct info */
-#ifdef HAVE_TSK_IN_GUP
- ret = get_user_pages_remote(tsk, tsk->mm, iova, 1, 0, &page, NULL, NULL);
-#else
- #ifdef HAVE_VMA_IN_GUP
- ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL, NULL);
- #else
- ret = get_user_pages_remote(tsk->mm, iova, 1, 0, &page, NULL);
- #endif
-#endif
- if (ret < 0)
- return 0;
-
- phys_addr = page_to_phys(page) | offset;
- put_page(page);
-
- return phys_addr;
-}
-
-static inline void *iova_to_kva(struct task_struct *tsk, unsigned long iova)
-{
- return phys_to_virt(iova_to_phys(tsk, iova));
-}
-#endif
-
-void kni_net_release_fifo_phy(struct kni_dev *kni);
-void kni_net_rx(struct kni_dev *kni);
-void kni_net_init(struct net_device *dev);
-void kni_net_config_lo_mode(char *lo_str);
-void kni_net_poll_resp(struct kni_dev *kni);
-
-#endif
diff --git a/kernel/linux/kni/kni_fifo.h b/kernel/linux/kni/kni_fifo.h
deleted file mode 100644
index 1ba5172002b6..000000000000
--- a/kernel/linux/kni/kni_fifo.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-#ifndef _KNI_FIFO_H_
-#define _KNI_FIFO_H_
-
-#include <rte_kni_common.h>
-
-/* Skip some memory barriers on Linux < 3.14 */
-#ifndef smp_load_acquire
-#define smp_load_acquire(a) (*(a))
-#endif
-#ifndef smp_store_release
-#define smp_store_release(a, b) *(a) = (b)
-#endif
-
-/**
- * Adds num elements into the fifo. Return the number actually written
- */
-static inline uint32_t
-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
-{
- uint32_t i = 0;
- uint32_t fifo_write = fifo->write;
- uint32_t fifo_read = smp_load_acquire(&fifo->read);
- uint32_t new_write = fifo_write;
-
- for (i = 0; i < num; i++) {
- new_write = (new_write + 1) & (fifo->len - 1);
-
- if (new_write == fifo_read)
- break;
- fifo->buffer[fifo_write] = data[i];
- fifo_write = new_write;
- }
- smp_store_release(&fifo->write, fifo_write);
-
- return i;
-}
-
-/**
- * Get up to num elements from the FIFO. Return the number actually read
- */
-static inline uint32_t
-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
-{
- uint32_t i = 0;
- uint32_t new_read = fifo->read;
- uint32_t fifo_write = smp_load_acquire(&fifo->write);
-
- for (i = 0; i < num; i++) {
- if (new_read == fifo_write)
- break;
-
- data[i] = fifo->buffer[new_read];
- new_read = (new_read + 1) & (fifo->len - 1);
- }
- smp_store_release(&fifo->read, new_read);
-
- return i;
-}
-
-/**
- * Get the num of elements in the fifo
- */
-static inline uint32_t
-kni_fifo_count(struct rte_kni_fifo *fifo)
-{
- uint32_t fifo_write = smp_load_acquire(&fifo->write);
- uint32_t fifo_read = smp_load_acquire(&fifo->read);
- return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1);
-}
-
-/**
- * Get the num of available elements in the fifo
- */
-static inline uint32_t
-kni_fifo_free_count(struct rte_kni_fifo *fifo)
-{
- uint32_t fifo_write = smp_load_acquire(&fifo->write);
- uint32_t fifo_read = smp_load_acquire(&fifo->read);
- return (fifo_read - fifo_write - 1) & (fifo->len - 1);
-}
-
-#endif /* _KNI_FIFO_H_ */
diff --git a/kernel/linux/kni/kni_misc.c b/kernel/linux/kni/kni_misc.c
deleted file mode 100644
index 0c3a86ee352e..000000000000
--- a/kernel/linux/kni/kni_misc.c
+++ /dev/null
@@ -1,719 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/pci.h>
-#include <linux/kthread.h>
-#include <linux/rwsem.h>
-#include <linux/mutex.h>
-#include <linux/nsproxy.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-
-#include <rte_kni_common.h>
-
-#include "compat.h"
-#include "kni_dev.h"
-
-MODULE_VERSION(KNI_VERSION);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Kernel Module for managing kni devices");
-
-#define KNI_RX_LOOP_NUM 1000
-
-#define KNI_MAX_DEVICES 32
-
-/* loopback mode */
-static char *lo_mode;
-
-/* Kernel thread mode */
-static char *kthread_mode;
-static uint32_t multiple_kthread_on;
-
-/* Default carrier state for created KNI network interfaces */
-static char *carrier;
-uint32_t kni_dflt_carrier;
-
-/* Request processing support for bifurcated drivers. */
-static char *enable_bifurcated;
-uint32_t bifurcated_support;
-
-/* KNI thread scheduling interval */
-static long min_scheduling_interval = 100; /* us */
-static long max_scheduling_interval = 200; /* us */
-
-#define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
-
-static int kni_net_id;
-
-struct kni_net {
- unsigned long device_in_use; /* device in use flag */
- struct mutex kni_kthread_lock;
- struct task_struct *kni_kthread;
- struct rw_semaphore kni_list_lock;
- struct list_head kni_list_head;
-};
-
-static int __net_init
-kni_init_net(struct net *net)
-{
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- struct kni_net *knet = net_generic(net, kni_net_id);
-
- memset(knet, 0, sizeof(*knet));
-#else
- struct kni_net *knet;
- int ret;
-
- knet = kzalloc(sizeof(struct kni_net), GFP_KERNEL);
- if (!knet) {
- ret = -ENOMEM;
- return ret;
- }
-#endif
-
- /* Clear the bit of device in use */
- clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
-
- mutex_init(&knet->kni_kthread_lock);
-
- init_rwsem(&knet->kni_list_lock);
- INIT_LIST_HEAD(&knet->kni_list_head);
-
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- return 0;
-#else
- ret = net_assign_generic(net, kni_net_id, knet);
- if (ret < 0)
- kfree(knet);
-
- return ret;
-#endif
-}
-
-static void __net_exit
-kni_exit_net(struct net *net)
-{
- struct kni_net *knet __maybe_unused;
-
- knet = net_generic(net, kni_net_id);
- mutex_destroy(&knet->kni_kthread_lock);
-
-#ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- kfree(knet);
-#endif
-}
-
-static struct pernet_operations kni_net_ops = {
- .init = kni_init_net,
- .exit = kni_exit_net,
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- .id = &kni_net_id,
- .size = sizeof(struct kni_net),
-#endif
-};
-
-static int
-kni_thread_single(void *data)
-{
- struct kni_net *knet = data;
- int j;
- struct kni_dev *dev;
-
- while (!kthread_should_stop()) {
- down_read(&knet->kni_list_lock);
- for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
- list_for_each_entry(dev, &knet->kni_list_head, list) {
- kni_net_rx(dev);
- kni_net_poll_resp(dev);
- }
- }
- up_read(&knet->kni_list_lock);
- /* reschedule out for a while */
- usleep_range(min_scheduling_interval, max_scheduling_interval);
- }
-
- return 0;
-}
-
-static int
-kni_thread_multiple(void *param)
-{
- int j;
- struct kni_dev *dev = param;
-
- while (!kthread_should_stop()) {
- for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
- kni_net_rx(dev);
- kni_net_poll_resp(dev);
- }
- usleep_range(min_scheduling_interval, max_scheduling_interval);
- }
-
- return 0;
-}
-
-static int
-kni_open(struct inode *inode, struct file *file)
-{
- struct net *net = current->nsproxy->net_ns;
- struct kni_net *knet = net_generic(net, kni_net_id);
-
- /* kni device can be opened by one user only per netns */
- if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
- return -EBUSY;
-
- file->private_data = get_net(net);
- pr_debug("/dev/kni opened\n");
-
- return 0;
-}
-
-static int
-kni_dev_remove(struct kni_dev *dev)
-{
- if (!dev)
- return -ENODEV;
-
- /*
- * The memory of kni device is allocated and released together
- * with net device. Release mbuf before freeing net device.
- */
- kni_net_release_fifo_phy(dev);
-
- if (dev->net_dev) {
- unregister_netdev(dev->net_dev);
- free_netdev(dev->net_dev);
- }
-
- return 0;
-}
-
-static int
-kni_release(struct inode *inode, struct file *file)
-{
- struct net *net = file->private_data;
- struct kni_net *knet = net_generic(net, kni_net_id);
- struct kni_dev *dev, *n;
-
- /* Stop kernel thread for single mode */
- if (multiple_kthread_on == 0) {
- mutex_lock(&knet->kni_kthread_lock);
- /* Stop kernel thread */
- if (knet->kni_kthread != NULL) {
- kthread_stop(knet->kni_kthread);
- knet->kni_kthread = NULL;
- }
- mutex_unlock(&knet->kni_kthread_lock);
- }
-
- down_write(&knet->kni_list_lock);
- list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
- /* Stop kernel thread for multiple mode */
- if (multiple_kthread_on && dev->pthread != NULL) {
- kthread_stop(dev->pthread);
- dev->pthread = NULL;
- }
-
- list_del(&dev->list);
- kni_dev_remove(dev);
- }
- up_write(&knet->kni_list_lock);
-
- /* Clear the bit of device in use */
- clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
-
- put_net(net);
- pr_debug("/dev/kni closed\n");
-
- return 0;
-}
-
-static int
-kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
-{
- if (!kni || !dev)
- return -1;
-
- /* Check if network name has been used */
- if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
- pr_err("KNI name %s duplicated\n", dev->name);
- return -1;
- }
-
- return 0;
-}
-
-static int
-kni_run_thread(struct kni_net *knet, struct kni_dev *kni, uint8_t force_bind)
-{
- /**
- * Create a new kernel thread for multiple mode, set its core affinity,
- * and finally wake it up.
- */
- if (multiple_kthread_on) {
- kni->pthread = kthread_create(kni_thread_multiple,
- (void *)kni, "kni_%s", kni->name);
- if (IS_ERR(kni->pthread)) {
- kni_dev_remove(kni);
- return -ECANCELED;
- }
-
- if (force_bind)
- kthread_bind(kni->pthread, kni->core_id);
- wake_up_process(kni->pthread);
- } else {
- mutex_lock(&knet->kni_kthread_lock);
-
- if (knet->kni_kthread == NULL) {
- knet->kni_kthread = kthread_create(kni_thread_single,
- (void *)knet, "kni_single");
- if (IS_ERR(knet->kni_kthread)) {
- mutex_unlock(&knet->kni_kthread_lock);
- kni_dev_remove(kni);
- return -ECANCELED;
- }
-
- if (force_bind)
- kthread_bind(knet->kni_kthread, kni->core_id);
- wake_up_process(knet->kni_kthread);
- }
-
- mutex_unlock(&knet->kni_kthread_lock);
- }
-
- return 0;
-}
-
-static int
-kni_ioctl_create(struct net *net, uint32_t ioctl_num,
- unsigned long ioctl_param)
-{
- struct kni_net *knet = net_generic(net, kni_net_id);
- int ret;
- struct rte_kni_device_info dev_info;
- struct net_device *net_dev = NULL;
- struct kni_dev *kni, *dev, *n;
-
- pr_info("Creating kni...\n");
- /* Check the buffer size, to avoid warning */
- if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
- return -EINVAL;
-
- /* Copy kni info from user space */
- if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)))
- return -EFAULT;
-
- /* Check if name is zero-ended */
- if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) {
- pr_err("kni.name not zero-terminated");
- return -EINVAL;
- }
-
- /**
- * Check if the cpu core id is valid for binding.
- */
- if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
- pr_err("cpu %u is not online\n", dev_info.core_id);
- return -EINVAL;
- }
-
- /* Check if it has been created */
- down_read(&knet->kni_list_lock);
- list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
- if (kni_check_param(dev, &dev_info) < 0) {
- up_read(&knet->kni_list_lock);
- return -EINVAL;
- }
- }
- up_read(&knet->kni_list_lock);
-
- net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
-#ifdef NET_NAME_USER
- NET_NAME_USER,
-#endif
- kni_net_init);
- if (net_dev == NULL) {
- pr_err("error allocating device \"%s\"\n", dev_info.name);
- return -EBUSY;
- }
-
- dev_net_set(net_dev, net);
-
- kni = netdev_priv(net_dev);
-
- kni->net_dev = net_dev;
- kni->core_id = dev_info.core_id;
- strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
-
- /* Translate user space info into kernel space info */
- if (dev_info.iova_mode) {
-#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
- kni->tx_q = iova_to_kva(current, dev_info.tx_phys);
- kni->rx_q = iova_to_kva(current, dev_info.rx_phys);
- kni->alloc_q = iova_to_kva(current, dev_info.alloc_phys);
- kni->free_q = iova_to_kva(current, dev_info.free_phys);
-
- kni->req_q = iova_to_kva(current, dev_info.req_phys);
- kni->resp_q = iova_to_kva(current, dev_info.resp_phys);
- kni->sync_va = dev_info.sync_va;
- kni->sync_kva = iova_to_kva(current, dev_info.sync_phys);
- kni->usr_tsk = current;
- kni->iova_mode = 1;
-#else
- pr_err("KNI module does not support IOVA to VA translation\n");
- return -EINVAL;
-#endif
- } else {
-
- kni->tx_q = phys_to_virt(dev_info.tx_phys);
- kni->rx_q = phys_to_virt(dev_info.rx_phys);
- kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
- kni->free_q = phys_to_virt(dev_info.free_phys);
-
- kni->req_q = phys_to_virt(dev_info.req_phys);
- kni->resp_q = phys_to_virt(dev_info.resp_phys);
- kni->sync_va = dev_info.sync_va;
- kni->sync_kva = phys_to_virt(dev_info.sync_phys);
- kni->iova_mode = 0;
- }
-
- kni->mbuf_size = dev_info.mbuf_size;
-
- pr_debug("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
- (unsigned long long) dev_info.tx_phys, kni->tx_q);
- pr_debug("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
- (unsigned long long) dev_info.rx_phys, kni->rx_q);
- pr_debug("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
- (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
- pr_debug("free_phys: 0x%016llx, free_q addr: 0x%p\n",
- (unsigned long long) dev_info.free_phys, kni->free_q);
- pr_debug("req_phys: 0x%016llx, req_q addr: 0x%p\n",
- (unsigned long long) dev_info.req_phys, kni->req_q);
- pr_debug("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
- (unsigned long long) dev_info.resp_phys, kni->resp_q);
- pr_debug("mbuf_size: %u\n", kni->mbuf_size);
-
- /* if user has provided a valid mac address */
- if (is_valid_ether_addr(dev_info.mac_addr)) {
-#ifdef HAVE_ETH_HW_ADDR_SET
- eth_hw_addr_set(net_dev, dev_info.mac_addr);
-#else
- memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
-#endif
- } else {
- /* Assign random MAC address. */
- eth_hw_addr_random(net_dev);
- }
-
- if (dev_info.mtu)
- net_dev->mtu = dev_info.mtu;
-#ifdef HAVE_MAX_MTU_PARAM
- net_dev->max_mtu = net_dev->mtu;
-
- if (dev_info.min_mtu)
- net_dev->min_mtu = dev_info.min_mtu;
-
- if (dev_info.max_mtu)
- net_dev->max_mtu = dev_info.max_mtu;
-#endif
-
- ret = register_netdev(net_dev);
- if (ret) {
- pr_err("error %i registering device \"%s\"\n",
- ret, dev_info.name);
- kni->net_dev = NULL;
- kni_dev_remove(kni);
- free_netdev(net_dev);
- return -ENODEV;
- }
-
- netif_carrier_off(net_dev);
-
- ret = kni_run_thread(knet, kni, dev_info.force_bind);
- if (ret != 0)
- return ret;
-
- down_write(&knet->kni_list_lock);
- list_add(&kni->list, &knet->kni_list_head);
- up_write(&knet->kni_list_lock);
-
- return 0;
-}
-
-static int
-kni_ioctl_release(struct net *net, uint32_t ioctl_num,
- unsigned long ioctl_param)
-{
- struct kni_net *knet = net_generic(net, kni_net_id);
- int ret = -EINVAL;
- struct kni_dev *dev, *n;
- struct rte_kni_device_info dev_info;
-
- if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
- return -EINVAL;
-
- if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)))
- return -EFAULT;
-
- /* Release the network device according to its name */
- if (strlen(dev_info.name) == 0)
- return -EINVAL;
-
- down_write(&knet->kni_list_lock);
- list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
- if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
- continue;
-
- if (multiple_kthread_on && dev->pthread != NULL) {
- kthread_stop(dev->pthread);
- dev->pthread = NULL;
- }
-
- list_del(&dev->list);
- kni_dev_remove(dev);
- ret = 0;
- break;
- }
- up_write(&knet->kni_list_lock);
- pr_info("%s release kni named %s\n",
- (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
-
- return ret;
-}
-
-static long
-kni_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param)
-{
- long ret = -EINVAL;
- struct net *net = current->nsproxy->net_ns;
-
- pr_debug("IOCTL num=0x%0x param=0x%0lx\n", ioctl_num, ioctl_param);
-
- /*
- * Switch according to the ioctl called
- */
- switch (_IOC_NR(ioctl_num)) {
- case _IOC_NR(RTE_KNI_IOCTL_TEST):
- /* For test only, not used */
- break;
- case _IOC_NR(RTE_KNI_IOCTL_CREATE):
- ret = kni_ioctl_create(net, ioctl_num, ioctl_param);
- break;
- case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
- ret = kni_ioctl_release(net, ioctl_num, ioctl_param);
- break;
- default:
- pr_debug("IOCTL default\n");
- break;
- }
-
- return ret;
-}
-
-static long
-kni_compat_ioctl(struct file *file, unsigned int ioctl_num,
- unsigned long ioctl_param)
-{
- /* 32 bits app on 64 bits OS to be supported later */
- pr_debug("Not implemented.\n");
-
- return -EINVAL;
-}
-
-static const struct file_operations kni_fops = {
- .owner = THIS_MODULE,
- .open = kni_open,
- .release = kni_release,
- .unlocked_ioctl = kni_ioctl,
- .compat_ioctl = kni_compat_ioctl,
-};
-
-static struct miscdevice kni_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = KNI_DEVICE,
- .fops = &kni_fops,
-};
-
-static int __init
-kni_parse_kthread_mode(void)
-{
- if (!kthread_mode)
- return 0;
-
- if (strcmp(kthread_mode, "single") == 0)
- return 0;
- else if (strcmp(kthread_mode, "multiple") == 0)
- multiple_kthread_on = 1;
- else
- return -1;
-
- return 0;
-}
-
-static int __init
-kni_parse_carrier_state(void)
-{
- if (!carrier) {
- kni_dflt_carrier = 0;
- return 0;
- }
-
- if (strcmp(carrier, "off") == 0)
- kni_dflt_carrier = 0;
- else if (strcmp(carrier, "on") == 0)
- kni_dflt_carrier = 1;
- else
- return -1;
-
- return 0;
-}
-
-static int __init
-kni_parse_bifurcated_support(void)
-{
- if (!enable_bifurcated) {
- bifurcated_support = 0;
- return 0;
- }
-
- if (strcmp(enable_bifurcated, "on") == 0)
- bifurcated_support = 1;
- else
- return -1;
-
- return 0;
-}
-
-static int __init
-kni_init(void)
-{
- int rc;
-
- if (kni_parse_kthread_mode() < 0) {
- pr_err("Invalid parameter for kthread_mode\n");
- return -EINVAL;
- }
-
- if (multiple_kthread_on == 0)
- pr_debug("Single kernel thread for all KNI devices\n");
- else
- pr_debug("Multiple kernel thread mode enabled\n");
-
- if (kni_parse_carrier_state() < 0) {
- pr_err("Invalid parameter for carrier\n");
- return -EINVAL;
- }
-
- if (kni_dflt_carrier == 0)
- pr_debug("Default carrier state set to off.\n");
- else
- pr_debug("Default carrier state set to on.\n");
-
- if (kni_parse_bifurcated_support() < 0) {
- pr_err("Invalid parameter for bifurcated support\n");
- return -EINVAL;
- }
- if (bifurcated_support == 1)
- pr_debug("bifurcated support is enabled.\n");
-
- if (min_scheduling_interval < 0 || max_scheduling_interval < 0 ||
- min_scheduling_interval > KNI_KTHREAD_MAX_RESCHEDULE_INTERVAL ||
- max_scheduling_interval > KNI_KTHREAD_MAX_RESCHEDULE_INTERVAL ||
- min_scheduling_interval >= max_scheduling_interval) {
- pr_err("Invalid parameters for scheduling interval\n");
- return -EINVAL;
- }
-
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- rc = register_pernet_subsys(&kni_net_ops);
-#else
- rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
-#endif
- if (rc)
- return -EPERM;
-
- rc = misc_register(&kni_misc);
- if (rc != 0) {
- pr_err("Misc registration failed\n");
- goto out;
- }
-
- /* Configure the lo mode according to the input parameter */
- kni_net_config_lo_mode(lo_mode);
-
- return 0;
-
-out:
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- unregister_pernet_subsys(&kni_net_ops);
-#else
- unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
-#endif
- return rc;
-}
-
-static void __exit
-kni_exit(void)
-{
- misc_deregister(&kni_misc);
-#ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
- unregister_pernet_subsys(&kni_net_ops);
-#else
- unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
-#endif
-}
-
-module_init(kni_init);
-module_exit(kni_exit);
-
-module_param(lo_mode, charp, 0644);
-MODULE_PARM_DESC(lo_mode,
-"KNI loopback mode (default=lo_mode_none):\n"
-"\t\tlo_mode_none Kernel loopback disabled\n"
-"\t\tlo_mode_fifo Enable kernel loopback with fifo\n"
-"\t\tlo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer\n"
-"\t\t"
-);
-
-module_param(kthread_mode, charp, 0644);
-MODULE_PARM_DESC(kthread_mode,
-"Kernel thread mode (default=single):\n"
-"\t\tsingle Single kernel thread mode enabled.\n"
-"\t\tmultiple Multiple kernel thread mode enabled.\n"
-"\t\t"
-);
-
-module_param(carrier, charp, 0644);
-MODULE_PARM_DESC(carrier,
-"Default carrier state for KNI interface (default=off):\n"
-"\t\toff Interfaces will be created with carrier state set to off.\n"
-"\t\ton Interfaces will be created with carrier state set to on.\n"
-"\t\t"
-);
-
-module_param(enable_bifurcated, charp, 0644);
-MODULE_PARM_DESC(enable_bifurcated,
-"Enable request processing support for bifurcated drivers, "
-"which means releasing rtnl_lock before calling userspace callback and "
-"supporting async requests (default=off):\n"
-"\t\ton Enable request processing support for bifurcated drivers.\n"
-"\t\t"
-);
-
-module_param(min_scheduling_interval, long, 0644);
-MODULE_PARM_DESC(min_scheduling_interval,
-"KNI thread min scheduling interval (default=100 microseconds)"
-);
-
-module_param(max_scheduling_interval, long, 0644);
-MODULE_PARM_DESC(max_scheduling_interval,
-"KNI thread max scheduling interval (default=200 microseconds)"
-);
diff --git a/kernel/linux/kni/kni_net.c b/kernel/linux/kni/kni_net.c
deleted file mode 100644
index 779ee3451a4c..000000000000
--- a/kernel/linux/kni/kni_net.c
+++ /dev/null
@@ -1,878 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright(c) 2010-2014 Intel Corporation.
- */
-
-/*
- * This code is inspired from the book "Linux Device Drivers" by
- * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h> /* eth_type_trans */
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/kthread.h>
-#include <linux/delay.h>
-#include <linux/rtnetlink.h>
-
-#include <rte_kni_common.h>
-#include <kni_fifo.h>
-
-#include "compat.h"
-#include "kni_dev.h"
-
-#define WD_TIMEOUT 5 /*jiffies */
-
-#define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
-
-/* typedef for rx function */
-typedef void (*kni_net_rx_t)(struct kni_dev *kni);
-
-static void kni_net_rx_normal(struct kni_dev *kni);
-
-/* kni rx function pointer, with default to normal rx */
-static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
-
-#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
-/* iova to kernel virtual address */
-static inline void *
-iova2kva(struct kni_dev *kni, void *iova)
-{
- return phys_to_virt(iova_to_phys(kni->usr_tsk, (unsigned long)iova));
-}
-
-static inline void *
-iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
-{
- return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_iova) +
- m->data_off);
-}
-#endif
-
-/* physical address to kernel virtual address */
-static void *
-pa2kva(void *pa)
-{
- return phys_to_virt((unsigned long)pa);
-}
-
-/* physical address to virtual address */
-static void *
-pa2va(void *pa, struct rte_kni_mbuf *m)
-{
- void *va;
-
- va = (void *)((unsigned long)pa +
- (unsigned long)m->buf_addr -
- (unsigned long)m->buf_iova);
- return va;
-}
-
-/* mbuf data kernel virtual address from mbuf kernel virtual address */
-static void *
-kva2data_kva(struct rte_kni_mbuf *m)
-{
- return phys_to_virt(m->buf_iova + m->data_off);
-}
-
-static inline void *
-get_kva(struct kni_dev *kni, void *pa)
-{
-#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
- if (kni->iova_mode == 1)
- return iova2kva(kni, pa);
-#endif
- return pa2kva(pa);
-}
-
-static inline void *
-get_data_kva(struct kni_dev *kni, void *pkt_kva)
-{
-#ifdef HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
- if (kni->iova_mode == 1)
- return iova2data_kva(kni, pkt_kva);
-#endif
- return kva2data_kva(pkt_kva);
-}
-
-/*
- * It can be called to process the request.
- */
-static int
-kni_net_process_request(struct net_device *dev, struct rte_kni_request *req)
-{
- struct kni_dev *kni = netdev_priv(dev);
- int ret = -1;
- void *resp_va;
- uint32_t num;
- int ret_val;
-
- ASSERT_RTNL();
-
- if (bifurcated_support) {
- /* If we need to wait and RTNL mutex is held
- * drop the mutex and hold reference to keep device
- */
- if (req->async == 0) {
- dev_hold(dev);
- rtnl_unlock();
- }
- }
-
- mutex_lock(&kni->sync_lock);
-
- /* Construct data */
- memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
- num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
- if (num < 1) {
- pr_err("Cannot send to req_q\n");
- ret = -EBUSY;
- goto fail;
- }
-
- if (bifurcated_support) {
- /* No result available since request is handled
- * asynchronously. set response to success.
- */
- if (req->async != 0) {
- req->result = 0;
- goto async;
- }
- }
-
- ret_val = wait_event_interruptible_timeout(kni->wq,
- kni_fifo_count(kni->resp_q), 3 * HZ);
- if (signal_pending(current) || ret_val <= 0) {
- ret = -ETIME;
- goto fail;
- }
- num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
- if (num != 1 || resp_va != kni->sync_va) {
- /* This should never happen */
- pr_err("No data in resp_q\n");
- ret = -ENODATA;
- goto fail;
- }
-
- memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
-async:
- ret = 0;
-
-fail:
- mutex_unlock(&kni->sync_lock);
- if (bifurcated_support) {
- if (req->async == 0) {
- rtnl_lock();
- dev_put(dev);
- }
- }
- return ret;
-}
-
-/*
- * Open and close
- */
-static int
-kni_net_open(struct net_device *dev)
-{
- int ret;
- struct rte_kni_request req;
-
- netif_start_queue(dev);
- if (kni_dflt_carrier == 1)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
-
- /* Setting if_up to non-zero means up */
- req.if_up = 1;
- ret = kni_net_process_request(dev, &req);
-
- return (ret == 0) ? req.result : ret;
-}
-
-static int
-kni_net_release(struct net_device *dev)
-{
- int ret;
- struct rte_kni_request req;
-
- netif_stop_queue(dev); /* can't transmit any more */
- netif_carrier_off(dev);
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
-
- /* Setting if_up to 0 means down */
- req.if_up = 0;
-
- if (bifurcated_support) {
- /* request async because of the deadlock problem */
- req.async = 1;
- }
-
- ret = kni_net_process_request(dev, &req);
-
- return (ret == 0) ? req.result : ret;
-}
-
-static void
-kni_fifo_trans_pa2va(struct kni_dev *kni,
- struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va)
-{
- uint32_t ret, i, num_dst, num_rx;
- struct rte_kni_mbuf *kva, *prev_kva;
- int nb_segs;
- int kva_nb_segs;
-
- do {
- num_dst = kni_fifo_free_count(dst_va);
- if (num_dst == 0)
- return;
-
- num_rx = min_t(uint32_t, num_dst, MBUF_BURST_SZ);
-
- num_rx = kni_fifo_get(src_pa, kni->pa, num_rx);
- if (num_rx == 0)
- return;
-
- for (i = 0; i < num_rx; i++) {
- kva = get_kva(kni, kni->pa[i]);
- kni->va[i] = pa2va(kni->pa[i], kva);
-
- kva_nb_segs = kva->nb_segs;
- for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
- if (!kva->next)
- break;
-
- prev_kva = kva;
- kva = get_kva(kni, kva->next);
- /* Convert physical address to virtual address */
- prev_kva->next = pa2va(prev_kva->next, kva);
- }
- }
-
- ret = kni_fifo_put(dst_va, kni->va, num_rx);
- if (ret != num_rx) {
- /* Failing should not happen */
- pr_err("Fail to enqueue entries into dst_va\n");
- return;
- }
- } while (1);
-}
-
-/* Try to release mbufs when kni release */
-void kni_net_release_fifo_phy(struct kni_dev *kni)
-{
- /* release rx_q first, because it can't release in userspace */
- kni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q);
- /* release alloc_q for speeding up kni release in userspace */
- kni_fifo_trans_pa2va(kni, kni->alloc_q, kni->free_q);
-}
-
-/*
- * Configuration changes (passed on by ifconfig)
- */
-static int
-kni_net_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* ignore other fields */
- return 0;
-}
-
-/*
- * Transmit a packet (called by the kernel)
- */
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int len = 0;
- uint32_t ret;
- struct kni_dev *kni = netdev_priv(dev);
- struct rte_kni_mbuf *pkt_kva = NULL;
- void *pkt_pa = NULL;
- void *pkt_va = NULL;
-
- /* save the timestamp */
-#ifdef HAVE_TRANS_START_HELPER
- netif_trans_update(dev);
-#else
- dev->trans_start = jiffies;
-#endif
-
- /* Check if the length of skb is less than mbuf size */
- if (skb->len > kni->mbuf_size)
- goto drop;
-
- /**
- * Check if it has at least one free entry in tx_q and
- * one entry in alloc_q.
- */
- if (kni_fifo_free_count(kni->tx_q) == 0 ||
- kni_fifo_count(kni->alloc_q) == 0) {
- /**
- * If no free entry in tx_q or no entry in alloc_q,
- * drops skb and goes out.
- */
- goto drop;
- }
-
- /* dequeue a mbuf from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
- if (likely(ret == 1)) {
- void *data_kva;
-
- pkt_kva = get_kva(kni, pkt_pa);
- data_kva = get_data_kva(kni, pkt_kva);
- pkt_va = pa2va(pkt_pa, pkt_kva);
-
- len = skb->len;
- memcpy(data_kva, skb->data, len);
- if (unlikely(len < ETH_ZLEN)) {
- memset(data_kva + len, 0, ETH_ZLEN - len);
- len = ETH_ZLEN;
- }
- pkt_kva->pkt_len = len;
- pkt_kva->data_len = len;
-
- /* enqueue mbuf into tx_q */
- ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
- if (unlikely(ret != 1)) {
- /* Failing should not happen */
- pr_err("Fail to enqueue mbuf into tx_q\n");
- goto drop;
- }
- } else {
- /* Failing should not happen */
- pr_err("Fail to dequeue mbuf from alloc_q\n");
- goto drop;
- }
-
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- dev->stats.tx_bytes += len;
- dev->stats.tx_packets++;
-
- return NETDEV_TX_OK;
-
-drop:
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- dev->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-
-/*
- * RX: normal working mode
- */
-static void
-kni_net_rx_normal(struct kni_dev *kni)
-{
- uint32_t ret;
- uint32_t len;
- uint32_t i, num_rx, num_fq;
- struct rte_kni_mbuf *kva, *prev_kva;
- void *data_kva;
- struct sk_buff *skb;
- struct net_device *dev = kni->net_dev;
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
- if (num_fq == 0) {
- /* No room on the free_q, bail out */
- return;
- }
-
- /* Calculate the number of entries to dequeue from rx_q */
- num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ);
-
- /* Burst dequeue from rx_q */
- num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);
- if (num_rx == 0)
- return;
-
- /* Transfer received packets to netif */
- for (i = 0; i < num_rx; i++) {
- kva = get_kva(kni, kni->pa[i]);
- len = kva->pkt_len;
- data_kva = get_data_kva(kni, kva);
- kni->va[i] = pa2va(kni->pa[i], kva);
-
- skb = netdev_alloc_skb(dev, len);
- if (!skb) {
- /* Update statistics */
- dev->stats.rx_dropped++;
- continue;
- }
-
- if (kva->nb_segs == 1) {
- memcpy(skb_put(skb, len), data_kva, len);
- } else {
- int nb_segs;
- int kva_nb_segs = kva->nb_segs;
-
- for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
- memcpy(skb_put(skb, kva->data_len),
- data_kva, kva->data_len);
-
- if (!kva->next)
- break;
-
- prev_kva = kva;
- kva = get_kva(kni, kva->next);
- data_kva = kva2data_kva(kva);
- /* Convert physical address to virtual address */
- prev_kva->next = pa2va(prev_kva->next, kva);
- }
- }
-
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Call netif interface */
-#ifdef HAVE_NETIF_RX_NI
- netif_rx_ni(skb);
-#else
- netif_rx(skb);
-#endif
-
- /* Update statistics */
- dev->stats.rx_bytes += len;
- dev->stats.rx_packets++;
- }
-
- /* Burst enqueue mbufs into free_q */
- ret = kni_fifo_put(kni->free_q, kni->va, num_rx);
- if (ret != num_rx)
- /* Failing should not happen */
- pr_err("Fail to enqueue entries into free_q\n");
-}
-
-/*
- * RX: loopback with enqueue/dequeue fifos.
- */
-static void
-kni_net_rx_lo_fifo(struct kni_dev *kni)
-{
- uint32_t ret;
- uint32_t len;
- uint32_t i, num, num_rq, num_tq, num_aq, num_fq;
- struct rte_kni_mbuf *kva, *next_kva;
- void *data_kva;
- struct rte_kni_mbuf *alloc_kva;
- void *alloc_data_kva;
- struct net_device *dev = kni->net_dev;
-
- /* Get the number of entries in rx_q */
- num_rq = kni_fifo_count(kni->rx_q);
-
- /* Get the number of free entries in tx_q */
- num_tq = kni_fifo_free_count(kni->tx_q);
-
- /* Get the number of entries in alloc_q */
- num_aq = kni_fifo_count(kni->alloc_q);
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
-
- /* Calculate the number of entries to be dequeued from rx_q */
- num = min(num_rq, num_tq);
- num = min(num, num_aq);
- num = min(num, num_fq);
- num = min_t(uint32_t, num, MBUF_BURST_SZ);
-
- /* Return if no entry to dequeue from rx_q */
- if (num == 0)
- return;
-
- /* Burst dequeue from rx_q */
- ret = kni_fifo_get(kni->rx_q, kni->pa, num);
- if (ret == 0)
- return; /* Failing should not happen */
-
- /* Dequeue entries from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num);
- if (ret) {
- num = ret;
- /* Copy mbufs */
- for (i = 0; i < num; i++) {
- kva = get_kva(kni, kni->pa[i]);
- len = kva->data_len;
- data_kva = get_data_kva(kni, kva);
- kni->va[i] = pa2va(kni->pa[i], kva);
-
- while (kva->next) {
- next_kva = get_kva(kni, kva->next);
- /* Convert physical address to virtual address */
- kva->next = pa2va(kva->next, next_kva);
- kva = next_kva;
- }
-
- alloc_kva = get_kva(kni, kni->alloc_pa[i]);
- alloc_data_kva = get_data_kva(kni, alloc_kva);
- kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva);
-
- memcpy(alloc_data_kva, data_kva, len);
- alloc_kva->pkt_len = len;
- alloc_kva->data_len = len;
-
- dev->stats.tx_bytes += len;
- dev->stats.rx_bytes += len;
- }
-
- /* Burst enqueue mbufs into tx_q */
- ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);
- if (ret != num)
- /* Failing should not happen */
- pr_err("Fail to enqueue mbufs into tx_q\n");
- }
-
- /* Burst enqueue mbufs into free_q */
- ret = kni_fifo_put(kni->free_q, kni->va, num);
- if (ret != num)
- /* Failing should not happen */
- pr_err("Fail to enqueue mbufs into free_q\n");
-
- /**
- * Update statistic, and enqueue/dequeue failure is impossible,
- * as all queues are checked at first.
- */
- dev->stats.tx_packets += num;
- dev->stats.rx_packets += num;
-}
-
-/*
- * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
- */
-static void
-kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
-{
- uint32_t ret;
- uint32_t len;
- uint32_t i, num_rq, num_fq, num;
- struct rte_kni_mbuf *kva, *prev_kva;
- void *data_kva;
- struct sk_buff *skb;
- struct net_device *dev = kni->net_dev;
-
- /* Get the number of entries in rx_q */
- num_rq = kni_fifo_count(kni->rx_q);
-
- /* Get the number of free entries in free_q */
- num_fq = kni_fifo_free_count(kni->free_q);
-
- /* Calculate the number of entries to dequeue from rx_q */
- num = min(num_rq, num_fq);
- num = min_t(uint32_t, num, MBUF_BURST_SZ);
-
- /* Return if no entry to dequeue from rx_q */
- if (num == 0)
- return;
-
- /* Burst dequeue mbufs from rx_q */
- ret = kni_fifo_get(kni->rx_q, kni->pa, num);
- if (ret == 0)
- return;
-
- /* Copy mbufs to sk buffer and then call tx interface */
- for (i = 0; i < num; i++) {
- kva = get_kva(kni, kni->pa[i]);
- len = kva->pkt_len;
- data_kva = get_data_kva(kni, kva);
- kni->va[i] = pa2va(kni->pa[i], kva);
-
- skb = netdev_alloc_skb(dev, len);
- if (skb) {
- memcpy(skb_put(skb, len), data_kva, len);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- dev_kfree_skb(skb);
- }
-
- /* Simulate real usage, allocate/copy skb twice */
- skb = netdev_alloc_skb(dev, len);
- if (skb == NULL) {
- dev->stats.rx_dropped++;
- continue;
- }
-
- if (kva->nb_segs == 1) {
- memcpy(skb_put(skb, len), data_kva, len);
- } else {
- int nb_segs;
- int kva_nb_segs = kva->nb_segs;
-
- for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
- memcpy(skb_put(skb, kva->data_len),
- data_kva, kva->data_len);
-
- if (!kva->next)
- break;
-
- prev_kva = kva;
- kva = get_kva(kni, kva->next);
- data_kva = get_data_kva(kni, kva);
- /* Convert physical address to virtual address */
- prev_kva->next = pa2va(prev_kva->next, kva);
- }
- }
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- dev->stats.rx_bytes += len;
- dev->stats.rx_packets++;
-
- /* call tx interface */
- kni_net_tx(skb, dev);
- }
-
- /* enqueue all the mbufs from rx_q into free_q */
- ret = kni_fifo_put(kni->free_q, kni->va, num);
- if (ret != num)
- /* Failing should not happen */
- pr_err("Fail to enqueue mbufs into free_q\n");
-}
-
-/* rx interface */
-void
-kni_net_rx(struct kni_dev *kni)
-{
- /**
- * It doesn't need to check if it is NULL pointer,
- * as it has a default value
- */
- (*kni_net_rx_func)(kni);
-}
-
-/*
- * Deal with a transmit timeout.
- */
-#ifdef HAVE_TX_TIMEOUT_TXQUEUE
-static void
-kni_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
-#else
-static void
-kni_net_tx_timeout(struct net_device *dev)
-#endif
-{
- pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev_trans_start(dev));
-
- dev->stats.tx_errors++;
- netif_wake_queue(dev);
-}
-
-static int
-kni_net_change_mtu(struct net_device *dev, int new_mtu)
-{
- int ret;
- struct rte_kni_request req;
-
- pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CHANGE_MTU;
- req.new_mtu = new_mtu;
- ret = kni_net_process_request(dev, &req);
- if (ret == 0 && req.result == 0)
- dev->mtu = new_mtu;
-
- return (ret == 0) ? req.result : ret;
-}
-
-static void
-kni_net_change_rx_flags(struct net_device *netdev, int flags)
-{
- struct rte_kni_request req;
-
- memset(&req, 0, sizeof(req));
-
- if (flags & IFF_ALLMULTI) {
- req.req_id = RTE_KNI_REQ_CHANGE_ALLMULTI;
-
- if (netdev->flags & IFF_ALLMULTI)
- req.allmulti = 1;
- else
- req.allmulti = 0;
- }
-
- if (flags & IFF_PROMISC) {
- req.req_id = RTE_KNI_REQ_CHANGE_PROMISC;
-
- if (netdev->flags & IFF_PROMISC)
- req.promiscusity = 1;
- else
- req.promiscusity = 0;
- }
-
- kni_net_process_request(netdev, &req);
-}
-
-/*
- * Checks if the user space application provided the resp message
- */
-void
-kni_net_poll_resp(struct kni_dev *kni)
-{
- if (kni_fifo_count(kni->resp_q))
- wake_up_interruptible(&kni->wq);
-}
-
-/*
- * Fill the eth header
- */
-static int
-kni_net_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, uint32_t len)
-{
- struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
- memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
- eth->h_proto = htons(type);
-
- return dev->hard_header_len;
-}
-
-/*
- * Re-fill the eth header
- */
-#ifdef HAVE_REBUILD_HEADER
-static int
-kni_net_rebuild_header(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct ethhdr *eth = (struct ethhdr *) skb->data;
-
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
-
- return 0;
-}
-#endif /* < 4.1.0 */
-
-/**
- * kni_net_set_mac - Change the Ethernet Address of the KNI NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int
-kni_net_set_mac(struct net_device *netdev, void *p)
-{
- int ret;
- struct rte_kni_request req;
- struct sockaddr *addr = p;
-
- memset(&req, 0, sizeof(req));
- req.req_id = RTE_KNI_REQ_CHANGE_MAC_ADDR;
-
- if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
- return -EADDRNOTAVAIL;
-
- memcpy(req.mac_addr, addr->sa_data, netdev->addr_len);
-#ifdef HAVE_ETH_HW_ADDR_SET
- eth_hw_addr_set(netdev, addr->sa_data);
-#else
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-#endif
-
- ret = kni_net_process_request(netdev, &req);
-
- return (ret == 0 ? req.result : ret);
-}
-
-#ifdef HAVE_CHANGE_CARRIER_CB
-static int
-kni_net_change_carrier(struct net_device *dev, bool new_carrier)
-{
- if (new_carrier)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
- return 0;
-}
-#endif
-
-static const struct header_ops kni_net_header_ops = {
- .create = kni_net_header,
- .parse = eth_header_parse,
-#ifdef HAVE_REBUILD_HEADER
- .rebuild = kni_net_rebuild_header,
-#endif /* < 4.1.0 */
- .cache = NULL, /* disable caching */
-};
-
-static const struct net_device_ops kni_net_netdev_ops = {
- .ndo_open = kni_net_open,
- .ndo_stop = kni_net_release,
- .ndo_set_config = kni_net_config,
- .ndo_change_rx_flags = kni_net_change_rx_flags,
- .ndo_start_xmit = kni_net_tx,
- .ndo_change_mtu = kni_net_change_mtu,
- .ndo_tx_timeout = kni_net_tx_timeout,
- .ndo_set_mac_address = kni_net_set_mac,
-#ifdef HAVE_CHANGE_CARRIER_CB
- .ndo_change_carrier = kni_net_change_carrier,
-#endif
-};
-
-static void kni_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->version, KNI_VERSION, sizeof(info->version));
- strlcpy(info->driver, "kni", sizeof(info->driver));
-}
-
-static const struct ethtool_ops kni_net_ethtool_ops = {
- .get_drvinfo = kni_get_drvinfo,
- .get_link = ethtool_op_get_link,
-};
-
-void
-kni_net_init(struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- init_waitqueue_head(&kni->wq);
- mutex_init(&kni->sync_lock);
-
- ether_setup(dev); /* assign some of the fields */
- dev->netdev_ops = &kni_net_netdev_ops;
- dev->header_ops = &kni_net_header_ops;
- dev->ethtool_ops = &kni_net_ethtool_ops;
- dev->watchdog_timeo = WD_TIMEOUT;
-}
-
-void
-kni_net_config_lo_mode(char *lo_str)
-{
- if (!lo_str) {
- pr_debug("loopback disabled");
- return;
- }
-
- if (!strcmp(lo_str, "lo_mode_none"))
- pr_debug("loopback disabled");
- else if (!strcmp(lo_str, "lo_mode_fifo")) {
- pr_debug("loopback mode=lo_mode_fifo enabled");
- kni_net_rx_func = kni_net_rx_lo_fifo;
- } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
- pr_debug("loopback mode=lo_mode_fifo_skb enabled");
- kni_net_rx_func = kni_net_rx_lo_fifo_skb;
- } else {
- pr_debug("Unknown loopback parameter, disabled");
- }
-}
diff --git a/kernel/linux/kni/meson.build b/kernel/linux/kni/meson.build
deleted file mode 100644
index 4c90069e9989..000000000000
--- a/kernel/linux/kni/meson.build
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
-
-# For SUSE build check function arguments of ndo_tx_timeout API
-# Ref: https://jira.devtools.intel.com/browse/DPDK-29263
-kmod_cflags = ''
-file_path = kernel_source_dir + '/include/linux/netdevice.h'
-run_cmd = run_command('grep', 'ndo_tx_timeout', file_path, check: false)
-
-if run_cmd.stdout().contains('txqueue') == true
- kmod_cflags = '-DHAVE_ARG_TX_QUEUE'
-endif
-
-
-kni_mkfile = custom_target('rte_kni_makefile',
- output: 'Makefile',
- command: ['touch', '@OUTPUT@'])
-
-kni_sources = files(
- 'kni_misc.c',
- 'kni_net.c',
- 'Kbuild',
-)
-
-custom_target('rte_kni',
- input: kni_sources,
- output: 'rte_kni.ko',
- command: ['make', '-j4', '-C', kernel_build_dir,
- 'M=' + meson.current_build_dir(),
- 'src=' + meson.current_source_dir(),
- ' '.join(['MODULE_CFLAGS=', kmod_cflags,'-include '])
- + dpdk_source_root + '/config/rte_config.h' +
- ' -I' + dpdk_source_root + '/lib/eal/include' +
- ' -I' + dpdk_source_root + '/lib/kni' +
- ' -I' + dpdk_build_root +
- ' -I' + meson.current_source_dir(),
- 'modules'] + cross_args,
- depends: kni_mkfile,
- install: install,
- install_dir: kernel_install_dir,
- build_by_default: get_option('enable_kmods'))
diff --git a/kernel/linux/meson.build b/kernel/linux/meson.build
index 16a094899446..8d47074621f7 100644
--- a/kernel/linux/meson.build
+++ b/kernel/linux/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
-subdirs = ['kni']
+subdirs = []
kernel_build_dir = get_option('kernel_dir')
kernel_source_dir = get_option('kernel_dir')
diff --git a/lib/eal/common/eal_common_log.c b/lib/eal/common/eal_common_log.c
index bd7b188ceb4a..0a1d219d6924 100644
--- a/lib/eal/common/eal_common_log.c
+++ b/lib/eal/common/eal_common_log.c
@@ -356,7 +356,6 @@ static const struct logtype logtype_strings[] = {
{RTE_LOGTYPE_PMD, "pmd"},
{RTE_LOGTYPE_HASH, "lib.hash"},
{RTE_LOGTYPE_LPM, "lib.lpm"},
- {RTE_LOGTYPE_KNI, "lib.kni"},
{RTE_LOGTYPE_ACL, "lib.acl"},
{RTE_LOGTYPE_POWER, "lib.power"},
{RTE_LOGTYPE_METER, "lib.meter"},
diff --git a/lib/eal/include/rte_log.h b/lib/eal/include/rte_log.h
index 6d2b0856a565..bdefff2a5933 100644
--- a/lib/eal/include/rte_log.h
+++ b/lib/eal/include/rte_log.h
@@ -34,7 +34,7 @@ extern "C" {
#define RTE_LOGTYPE_PMD 5 /**< Log related to poll mode driver. */
#define RTE_LOGTYPE_HASH 6 /**< Log related to hash table. */
#define RTE_LOGTYPE_LPM 7 /**< Log related to LPM. */
-#define RTE_LOGTYPE_KNI 8 /**< Log related to KNI. */
+ /* was RTE_LOGTYPE_KNI */
#define RTE_LOGTYPE_ACL 9 /**< Log related to ACL. */
#define RTE_LOGTYPE_POWER 10 /**< Log related to power. */
#define RTE_LOGTYPE_METER 11 /**< Log related to QoS meter. */
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index c6efd920145c..a1fefcd9d83a 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -1084,11 +1084,6 @@ rte_eal_init(int argc, char **argv)
*/
iova_mode = RTE_IOVA_VA;
RTE_LOG(DEBUG, EAL, "Physical addresses are unavailable, selecting IOVA as VA mode.\n");
-#if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
- } else if (rte_eal_check_module("rte_kni") == 1) {
- iova_mode = RTE_IOVA_PA;
- RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI performance.\n");
-#endif
} else if (is_iommu_enabled()) {
/* we have an IOMMU, pick IOVA as VA mode */
iova_mode = RTE_IOVA_VA;
@@ -1101,20 +1096,6 @@ rte_eal_init(int argc, char **argv)
RTE_LOG(DEBUG, EAL, "IOMMU is not available, selecting IOVA as PA mode.\n");
}
}
-#if defined(RTE_LIB_KNI) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
- /* Workaround for KNI which requires physical address to work
- * in kernels < 4.10
- */
- if (iova_mode == RTE_IOVA_VA &&
- rte_eal_check_module("rte_kni") == 1) {
- if (phys_addrs) {
- iova_mode = RTE_IOVA_PA;
- RTE_LOG(WARNING, EAL, "Forcing IOVA as 'PA' because KNI module is loaded\n");
- } else {
- RTE_LOG(DEBUG, EAL, "KNI can not work since physical addresses are unavailable\n");
- }
- }
-#endif
rte_eal_get_configuration()->iova_mode = iova_mode;
} else {
rte_eal_get_configuration()->iova_mode =
diff --git a/lib/kni/meson.build b/lib/kni/meson.build
deleted file mode 100644
index 5ce410f7f2d2..000000000000
--- a/lib/kni/meson.build
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
-
-if is_windows
- build = false
- reason = 'not supported on Windows'
- subdir_done()
-endif
-
-if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0
- build = false
- reason = 'requires IOVA in mbuf (set enable_iova_as_pa option)'
-endif
-
-if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
- build = false
- reason = 'only supported on 64-bit Linux'
-endif
-sources = files('rte_kni.c')
-headers = files('rte_kni.h', 'rte_kni_common.h')
-deps += ['ethdev', 'pci']
diff --git a/lib/kni/rte_kni.c b/lib/kni/rte_kni.c
deleted file mode 100644
index bfa6a001ff59..000000000000
--- a/lib/kni/rte_kni.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef RTE_EXEC_ENV_LINUX
-#error "KNI is not supported"
-#endif
-
-#include <string.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/ioctl.h>
-#include <linux/version.h>
-
-#include <rte_string_fns.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_log.h>
-#include <rte_kni.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal_memconfig.h>
-#include <rte_kni_common.h>
-#include "rte_kni_fifo.h"
-
-#define MAX_MBUF_BURST_NUM 32
-
-/* Maximum number of ring entries */
-#define KNI_FIFO_COUNT_MAX 1024
-#define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
- sizeof(struct rte_kni_fifo))
-
-#define KNI_REQUEST_MBUF_NUM_MAX 32
-
-#define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
-
-#define KNI_MZ_NAME_FMT "kni_info_%s"
-#define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
-#define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
-#define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
-#define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
-#define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
-#define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
-#define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
-
-TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
-
-static struct rte_tailq_elem rte_kni_tailq = {
- .name = "RTE_KNI",
-};
-EAL_REGISTER_TAILQ(rte_kni_tailq)
-
-/**
- * KNI context
- */
-struct rte_kni {
- char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
- uint16_t group_id; /**< Group ID of KNI devices */
- uint32_t slot_id; /**< KNI pool slot ID */
- struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
- unsigned int mbuf_size; /**< mbuf size */
-
- const struct rte_memzone *m_tx_q; /**< TX queue memzone */
- const struct rte_memzone *m_rx_q; /**< RX queue memzone */
- const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
- const struct rte_memzone *m_free_q; /**< Free queue memzone */
-
- struct rte_kni_fifo *tx_q; /**< TX queue */
- struct rte_kni_fifo *rx_q; /**< RX queue */
- struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
- struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
-
- const struct rte_memzone *m_req_q; /**< Request queue memzone */
- const struct rte_memzone *m_resp_q; /**< Response queue memzone */
- const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
-
- /* For request & response */
- struct rte_kni_fifo *req_q; /**< Request queue */
- struct rte_kni_fifo *resp_q; /**< Response queue */
- void *sync_addr; /**< Req/Resp Mem address */
-
- struct rte_kni_ops ops; /**< operations for request */
-};
-
-enum kni_ops_status {
- KNI_REQ_NO_REGISTER = 0,
- KNI_REQ_REGISTERED,
-};
-
-static void kni_free_mbufs(struct rte_kni *kni);
-static void kni_allocate_mbufs(struct rte_kni *kni);
-
-static volatile int kni_fd = -1;
-
-/* Shall be called before any allocation happens */
-int
-rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
-{
- RTE_LOG(WARNING, KNI, "WARNING: KNI is deprecated and will be removed in DPDK 23.11\n");
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
- if (rte_eal_iova_mode() != RTE_IOVA_PA) {
- RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
- return -1;
- }
-#endif
-
- /* Check FD and open */
- if (kni_fd < 0) {
- kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0) {
- RTE_LOG(ERR, KNI,
- "Can not open /dev/%s\n", KNI_DEVICE);
- return -1;
- }
- }
-
- return 0;
-}
-
-static struct rte_kni *
-__rte_kni_get(const char *name)
-{
- struct rte_kni *kni;
- struct rte_tailq_entry *te;
- struct rte_kni_list *kni_list;
-
- kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
-
- TAILQ_FOREACH(te, kni_list, next) {
- kni = te->data;
- if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
- break;
- }
-
- if (te == NULL)
- kni = NULL;
-
- return kni;
-}
-
-static int
-kni_reserve_mz(struct rte_kni *kni)
-{
- char mz_name[RTE_MEMZONE_NAMESIZE];
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
- kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
- kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
- kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
- kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
- kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
- kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
-
- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
- kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
- RTE_MEMZONE_IOVA_CONTIG);
- KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
-
- return 0;
-
-sync_addr_fail:
- rte_memzone_free(kni->m_resp_q);
-resp_q_fail:
- rte_memzone_free(kni->m_req_q);
-req_q_fail:
- rte_memzone_free(kni->m_free_q);
-free_q_fail:
- rte_memzone_free(kni->m_alloc_q);
-alloc_q_fail:
- rte_memzone_free(kni->m_rx_q);
-rx_q_fail:
- rte_memzone_free(kni->m_tx_q);
-tx_q_fail:
- return -1;
-}
-
-static void
-kni_release_mz(struct rte_kni *kni)
-{
- rte_memzone_free(kni->m_tx_q);
- rte_memzone_free(kni->m_rx_q);
- rte_memzone_free(kni->m_alloc_q);
- rte_memzone_free(kni->m_free_q);
- rte_memzone_free(kni->m_req_q);
- rte_memzone_free(kni->m_resp_q);
- rte_memzone_free(kni->m_sync_addr);
-}
-
-struct rte_kni *
-rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
- const struct rte_kni_conf *conf,
- struct rte_kni_ops *ops)
-{
- int ret;
- struct rte_kni_device_info dev_info;
- struct rte_kni *kni;
- struct rte_tailq_entry *te;
- struct rte_kni_list *kni_list;
-
- if (!pktmbuf_pool || !conf || !conf->name[0])
- return NULL;
-
- /* Check if KNI subsystem has been initialized */
- if (kni_fd < 0) {
- RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
- return NULL;
- }
-
- rte_mcfg_tailq_write_lock();
-
- kni = __rte_kni_get(conf->name);
- if (kni != NULL) {
- RTE_LOG(ERR, KNI, "KNI already exists\n");
- goto unlock;
- }
-
- te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
- if (te == NULL) {
- RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
- goto unlock;
- }
-
- kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
- if (kni == NULL) {
- RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
- goto kni_fail;
- }
-
- strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE);
-
- if (ops)
- memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
- else
- kni->ops.port_id = UINT16_MAX;
-
- memset(&dev_info, 0, sizeof(dev_info));
- dev_info.core_id = conf->core_id;
- dev_info.force_bind = conf->force_bind;
- dev_info.group_id = conf->group_id;
- dev_info.mbuf_size = conf->mbuf_size;
- dev_info.mtu = conf->mtu;
- dev_info.min_mtu = conf->min_mtu;
- dev_info.max_mtu = conf->max_mtu;
-
- memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
-
- strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
-
- ret = kni_reserve_mz(kni);
- if (ret < 0)
- goto mz_fail;
-
- /* TX RING */
- kni->tx_q = kni->m_tx_q->addr;
- kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
- dev_info.tx_phys = kni->m_tx_q->iova;
-
- /* RX RING */
- kni->rx_q = kni->m_rx_q->addr;
- kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
- dev_info.rx_phys = kni->m_rx_q->iova;
-
- /* ALLOC RING */
- kni->alloc_q = kni->m_alloc_q->addr;
- kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
- dev_info.alloc_phys = kni->m_alloc_q->iova;
-
- /* FREE RING */
- kni->free_q = kni->m_free_q->addr;
- kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
- dev_info.free_phys = kni->m_free_q->iova;
-
- /* Request RING */
- kni->req_q = kni->m_req_q->addr;
- kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
- dev_info.req_phys = kni->m_req_q->iova;
-
- /* Response RING */
- kni->resp_q = kni->m_resp_q->addr;
- kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
- dev_info.resp_phys = kni->m_resp_q->iova;
-
- /* Req/Resp sync mem area */
- kni->sync_addr = kni->m_sync_addr->addr;
- dev_info.sync_va = kni->m_sync_addr->addr;
- dev_info.sync_phys = kni->m_sync_addr->iova;
-
- kni->pktmbuf_pool = pktmbuf_pool;
- kni->group_id = conf->group_id;
- kni->mbuf_size = conf->mbuf_size;
-
- dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0;
-
- ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
- if (ret < 0)
- goto ioctl_fail;
-
- te->data = kni;
-
- kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
- TAILQ_INSERT_TAIL(kni_list, te, next);
-
- rte_mcfg_tailq_write_unlock();
-
- /* Allocate mbufs and then put them into alloc_q */
- kni_allocate_mbufs(kni);
-
- return kni;
-
-ioctl_fail:
- kni_release_mz(kni);
-mz_fail:
- rte_free(kni);
-kni_fail:
- rte_free(te);
-unlock:
- rte_mcfg_tailq_write_unlock();
-
- return NULL;
-}
-
-static void
-kni_free_fifo(struct rte_kni_fifo *fifo)
-{
- int ret;
- struct rte_mbuf *pkt;
-
- do {
- ret = kni_fifo_get(fifo, (void **)&pkt, 1);
- if (ret)
- rte_pktmbuf_free(pkt);
- } while (ret);
-}
-
-static void *
-va2pa(struct rte_mbuf *m)
-{
- return (void *)((unsigned long)m -
- ((unsigned long)m->buf_addr - (unsigned long)rte_mbuf_iova_get(m)));
-}
-
-static void *
-va2pa_all(struct rte_mbuf *mbuf)
-{
- void *phy_mbuf = va2pa(mbuf);
- struct rte_mbuf *next = mbuf->next;
- while (next) {
- mbuf->next = va2pa(next);
- mbuf = next;
- next = mbuf->next;
- }
- return phy_mbuf;
-}
-
-static void
-obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
- unsigned obj_idx __rte_unused)
-{
- struct rte_mbuf *m = obj;
- void *mbuf_phys = opaque;
-
- if (va2pa(m) == mbuf_phys)
- rte_pktmbuf_free(m);
-}
-
-static void
-kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
-{
- void *mbuf_phys;
- int ret;
-
- do {
- ret = kni_fifo_get(fifo, &mbuf_phys, 1);
- if (ret)
- rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
- } while (ret);
-}
-
-int
-rte_kni_release(struct rte_kni *kni)
-{
- struct rte_tailq_entry *te;
- struct rte_kni_list *kni_list;
- struct rte_kni_device_info dev_info;
- uint32_t retry = 5;
-
- if (!kni)
- return -1;
-
- kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
-
- rte_mcfg_tailq_write_lock();
-
- TAILQ_FOREACH(te, kni_list, next) {
- if (te->data == kni)
- break;
- }
-
- if (te == NULL)
- goto unlock;
-
- strlcpy(dev_info.name, kni->name, sizeof(dev_info.name));
- if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
- RTE_LOG(ERR, KNI, "Fail to release kni device\n");
- goto unlock;
- }
-
- TAILQ_REMOVE(kni_list, te, next);
-
- rte_mcfg_tailq_write_unlock();
-
- /* mbufs in all fifo should be released, except request/response */
-
- /* wait until all rxq packets processed by kernel */
- while (kni_fifo_count(kni->rx_q) && retry--)
- usleep(1000);
-
- if (kni_fifo_count(kni->rx_q))
- RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
-
- kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
- kni_free_fifo(kni->tx_q);
- kni_free_fifo(kni->free_q);
-
- kni_release_mz(kni);
-
- rte_free(kni);
-
- rte_free(te);
-
- return 0;
-
-unlock:
- rte_mcfg_tailq_write_unlock();
-
- return -1;
-}
-
-/* default callback for request of configuring device mac address */
-static int
-kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
-{
- int ret = 0;
-
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
- return -EINVAL;
- }
-
- RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id);
-
- ret = rte_eth_dev_default_mac_addr_set(port_id,
- (struct rte_ether_addr *)mac_addr);
- if (ret < 0)
- RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n",
- port_id);
-
- return ret;
-}
-
-/* default callback for request of configuring promiscuous mode */
-static int
-kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
-{
- int ret;
-
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
- return -EINVAL;
- }
-
- RTE_LOG(INFO, KNI, "Configure promiscuous mode of %d to %d\n",
- port_id, to_on);
-
- if (to_on)
- ret = rte_eth_promiscuous_enable(port_id);
- else
- ret = rte_eth_promiscuous_disable(port_id);
-
- if (ret != 0)
- RTE_LOG(ERR, KNI,
- "Failed to %s promiscuous mode for port %u: %s\n",
- to_on ? "enable" : "disable", port_id,
- rte_strerror(-ret));
-
- return ret;
-}
-
-/* default callback for request of configuring allmulticast mode */
-static int
-kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
-{
- int ret;
-
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
- return -EINVAL;
- }
-
- RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
- port_id, to_on);
-
- if (to_on)
- ret = rte_eth_allmulticast_enable(port_id);
- else
- ret = rte_eth_allmulticast_disable(port_id);
- if (ret != 0)
- RTE_LOG(ERR, KNI,
- "Failed to %s allmulticast mode for port %u: %s\n",
- to_on ? "enable" : "disable", port_id,
- rte_strerror(-ret));
-
- return ret;
-}
-
-int
-rte_kni_handle_request(struct rte_kni *kni)
-{
- unsigned int ret;
- struct rte_kni_request *req = NULL;
-
- if (kni == NULL)
- return -1;
-
- /* Get request mbuf */
- ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
- if (ret != 1)
- return 0; /* It is OK of can not getting the request mbuf */
-
- if (req != kni->sync_addr) {
- RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
- return -1;
- }
-
- /* Analyze the request and call the relevant actions for it */
- switch (req->req_id) {
- case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
- if (kni->ops.change_mtu)
- req->result = kni->ops.change_mtu(kni->ops.port_id,
- req->new_mtu);
- break;
- case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
- if (kni->ops.config_network_if)
- req->result = kni->ops.config_network_if(kni->ops.port_id,
- req->if_up);
- break;
- case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */
- if (kni->ops.config_mac_address)
- req->result = kni->ops.config_mac_address(
- kni->ops.port_id, req->mac_addr);
- else if (kni->ops.port_id != UINT16_MAX)
- req->result = kni_config_mac_address(
- kni->ops.port_id, req->mac_addr);
- break;
- case RTE_KNI_REQ_CHANGE_PROMISC: /* Change PROMISCUOUS MODE */
- if (kni->ops.config_promiscusity)
- req->result = kni->ops.config_promiscusity(
- kni->ops.port_id, req->promiscusity);
- else if (kni->ops.port_id != UINT16_MAX)
- req->result = kni_config_promiscusity(
- kni->ops.port_id, req->promiscusity);
- break;
- case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
- if (kni->ops.config_allmulticast)
- req->result = kni->ops.config_allmulticast(
- kni->ops.port_id, req->allmulti);
- else if (kni->ops.port_id != UINT16_MAX)
- req->result = kni_config_allmulticast(
- kni->ops.port_id, req->allmulti);
- break;
- default:
- RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
- req->result = -EINVAL;
- break;
- }
-
- /* if needed, construct response buffer and put it back to resp_q */
- if (!req->async)
- ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
- else
- ret = 1;
- if (ret != 1) {
- RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
- return -1; /* It is an error of can't putting the mbuf back */
- }
-
- return 0;
-}
-
-unsigned
-rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
-{
- num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
- void *phy_mbufs[num];
- unsigned int ret;
- unsigned int i;
-
- for (i = 0; i < num; i++)
- phy_mbufs[i] = va2pa_all(mbufs[i]);
-
- ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
-
- /* Get mbufs from free_q and then free them */
- kni_free_mbufs(kni);
-
- return ret;
-}
-
-unsigned
-rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
-{
- unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
-
- /* If buffers removed or alloc_q is empty, allocate mbufs and then put them into alloc_q */
- if (ret || (kni_fifo_count(kni->alloc_q) == 0))
- kni_allocate_mbufs(kni);
-
- return ret;
-}
-
-static void
-kni_free_mbufs(struct rte_kni *kni)
-{
- int i, ret;
- struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-
- ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
- if (likely(ret > 0)) {
- for (i = 0; i < ret; i++)
- rte_pktmbuf_free(pkts[i]);
- }
-}
-
-static void
-kni_allocate_mbufs(struct rte_kni *kni)
-{
- int i, ret;
- struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
- void *phys[MAX_MBUF_BURST_NUM];
- int allocq_free;
-
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
- offsetof(struct rte_kni_mbuf, pool));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
- offsetof(struct rte_kni_mbuf, buf_addr));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
- offsetof(struct rte_kni_mbuf, next));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
- offsetof(struct rte_kni_mbuf, data_off));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
- offsetof(struct rte_kni_mbuf, data_len));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
- offsetof(struct rte_kni_mbuf, pkt_len));
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
- offsetof(struct rte_kni_mbuf, ol_flags));
-
- /* Check if pktmbuf pool has been configured */
- if (kni->pktmbuf_pool == NULL) {
- RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
- return;
- }
-
- allocq_free = kni_fifo_free_count(kni->alloc_q);
- allocq_free = (allocq_free > MAX_MBUF_BURST_NUM) ?
- MAX_MBUF_BURST_NUM : allocq_free;
- for (i = 0; i < allocq_free; i++) {
- pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
- if (unlikely(pkts[i] == NULL)) {
- /* Out of memory */
- RTE_LOG(ERR, KNI, "Out of memory\n");
- break;
- }
- phys[i] = va2pa(pkts[i]);
- }
-
- /* No pkt mbuf allocated */
- if (i <= 0)
- return;
-
- ret = kni_fifo_put(kni->alloc_q, phys, i);
-
- /* Check if any mbufs not put into alloc_q, and then free them */
- if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
- int j;
-
- for (j = ret; j < i; j++)
- rte_pktmbuf_free(pkts[j]);
- }
-}
-
-struct rte_kni *
-rte_kni_get(const char *name)
-{
- struct rte_kni *kni;
-
- if (name == NULL || name[0] == '\0')
- return NULL;
-
- rte_mcfg_tailq_read_lock();
-
- kni = __rte_kni_get(name);
-
- rte_mcfg_tailq_read_unlock();
-
- return kni;
-}
-
-const char *
-rte_kni_get_name(const struct rte_kni *kni)
-{
- return kni->name;
-}
-
-static enum kni_ops_status
-kni_check_request_register(struct rte_kni_ops *ops)
-{
- /* check if KNI request ops has been registered*/
- if (ops == NULL)
- return KNI_REQ_NO_REGISTER;
-
- if (ops->change_mtu == NULL
- && ops->config_network_if == NULL
- && ops->config_mac_address == NULL
- && ops->config_promiscusity == NULL
- && ops->config_allmulticast == NULL)
- return KNI_REQ_NO_REGISTER;
-
- return KNI_REQ_REGISTERED;
-}
-
-int
-rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops)
-{
- enum kni_ops_status req_status;
-
- if (ops == NULL) {
- RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
- return -1;
- }
-
- if (kni == NULL) {
- RTE_LOG(ERR, KNI, "Invalid kni info.\n");
- return -1;
- }
-
- req_status = kni_check_request_register(&kni->ops);
- if (req_status == KNI_REQ_REGISTERED) {
- RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
- return -1;
- }
-
- memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
- return 0;
-}
-
-int
-rte_kni_unregister_handlers(struct rte_kni *kni)
-{
- if (kni == NULL) {
- RTE_LOG(ERR, KNI, "Invalid kni info.\n");
- return -1;
- }
-
- memset(&kni->ops, 0, sizeof(struct rte_kni_ops));
-
- return 0;
-}
-
-int
-rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
-{
- char path[64];
- char old_carrier[2];
- const char *new_carrier;
- int old_linkup;
- int fd, ret;
-
- if (kni == NULL)
- return -1;
-
- snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier",
- kni->name);
-
- fd = open(path, O_RDWR);
- if (fd == -1) {
- RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path);
- return -1;
- }
-
- ret = read(fd, old_carrier, 2);
- if (ret < 1) {
- close(fd);
- return -1;
- }
- old_linkup = (old_carrier[0] == '1');
-
- if (old_linkup == (int)linkup)
- goto out;
-
- new_carrier = linkup ? "1" : "0";
- ret = write(fd, new_carrier, 1);
- if (ret < 1) {
- RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path);
- close(fd);
- return -1;
- }
-out:
- close(fd);
- return old_linkup;
-}
-
-void
-rte_kni_close(void)
-{
- if (kni_fd < 0)
- return;
-
- close(kni_fd);
- kni_fd = -1;
-}
diff --git a/lib/kni/rte_kni.h b/lib/kni/rte_kni.h
deleted file mode 100644
index 1e508acc829b..000000000000
--- a/lib/kni/rte_kni.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef _RTE_KNI_H_
-#define _RTE_KNI_H_
-
-/**
- * @file
- * RTE KNI
- *
- * The KNI library provides the ability to create and destroy kernel NIC
- * interfaces that may be used by the RTE application to receive/transmit
- * packets from/to Linux kernel net interfaces.
- *
- * This library provides two APIs to burst receive packets from KNI interfaces,
- * and burst transmit packets to KNI interfaces.
- */
-
-#include <rte_compat.h>
-#include <rte_pci.h>
-#include <rte_ether.h>
-
-#include <rte_kni_common.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct rte_kni;
-struct rte_mbuf;
-
-/**
- * Structure which has the function pointers for KNI interface.
- */
-struct rte_kni_ops {
- uint16_t port_id; /* Port ID */
-
- /* Pointer to function of changing MTU */
- int (*change_mtu)(uint16_t port_id, unsigned int new_mtu);
-
- /* Pointer to function of configuring network interface */
- int (*config_network_if)(uint16_t port_id, uint8_t if_up);
-
- /* Pointer to function of configuring mac address */
- int (*config_mac_address)(uint16_t port_id, uint8_t mac_addr[]);
-
- /* Pointer to function of configuring promiscuous mode */
- int (*config_promiscusity)(uint16_t port_id, uint8_t to_on);
-
- /* Pointer to function of configuring allmulticast mode */
- int (*config_allmulticast)(uint16_t port_id, uint8_t to_on);
-};
-
-/**
- * Structure for configuring KNI device.
- */
-struct rte_kni_conf {
- /*
- * KNI name which will be used in relevant network device.
- * Let the name as short as possible, as it will be part of
- * memzone name.
- */
- char name[RTE_KNI_NAMESIZE];
- uint32_t core_id; /* Core ID to bind kernel thread on */
- uint16_t group_id; /* Group ID */
- unsigned mbuf_size; /* mbuf size */
- struct rte_pci_addr addr; /* deprecated */
- struct rte_pci_id id; /* deprecated */
-
- __extension__
- uint8_t force_bind : 1; /* Flag to bind kernel thread */
- uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; /* MAC address assigned to KNI */
- uint16_t mtu;
- uint16_t min_mtu;
- uint16_t max_mtu;
-};
-
-/**
- * Initialize and preallocate KNI subsystem
- *
- * This function is to be executed on the main lcore only, after EAL
- * initialization and before any KNI interface is attempted to be
- * allocated
- *
- * @param max_kni_ifaces
- * The maximum number of KNI interfaces that can coexist concurrently
- *
- * @return
- * - 0 indicates success.
- * - negative value indicates failure.
- */
-int rte_kni_init(unsigned int max_kni_ifaces);
-
-
-/**
- * Allocate KNI interface according to the port id, mbuf size, mbuf pool,
- * configurations and callbacks for kernel requests.The KNI interface created
- * in the kernel space is the net interface the traditional Linux application
- * talking to.
- *
- * The rte_kni_alloc shall not be called before rte_kni_init() has been
- * called. rte_kni_alloc is thread safe.
- *
- * The mempool should have capacity of more than "2 x KNI_FIFO_COUNT_MAX"
- * elements for each KNI interface allocated.
- *
- * @param pktmbuf_pool
- * The mempool for allocating mbufs for packets.
- * @param conf
- * The pointer to the configurations of the KNI device.
- * @param ops
- * The pointer to the callbacks for the KNI kernel requests.
- *
- * @return
- * - The pointer to the context of a KNI interface.
- * - NULL indicate error.
- */
-struct rte_kni *rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
- const struct rte_kni_conf *conf, struct rte_kni_ops *ops);
-
-/**
- * Release KNI interface according to the context. It will also release the
- * paired KNI interface in kernel space. All processing on the specific KNI
- * context need to be stopped before calling this interface.
- *
- * rte_kni_release is thread safe.
- *
- * @param kni
- * The pointer to the context of an existent KNI interface.
- *
- * @return
- * - 0 indicates success.
- * - negative value indicates failure.
- */
-int rte_kni_release(struct rte_kni *kni);
-
-/**
- * It is used to handle the request mbufs sent from kernel space.
- * Then analyzes it and calls the specific actions for the specific requests.
- * Finally constructs the response mbuf and puts it back to the resp_q.
- *
- * @param kni
- * The pointer to the context of an existent KNI interface.
- *
- * @return
- * - 0
- * - negative value indicates failure.
- */
-int rte_kni_handle_request(struct rte_kni *kni);
-
-/**
- * Retrieve a burst of packets from a KNI interface. The retrieved packets are
- * stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles allocating
- * the mbufs for KNI interface alloc queue.
- *
- * @param kni
- * The KNI interface context.
- * @param mbufs
- * The array to store the pointers of mbufs.
- * @param num
- * The maximum number per burst.
- *
- * @return
- * The actual number of packets retrieved.
- */
-unsigned rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,
- unsigned num);
-
-/**
- * Send a burst of packets to a KNI interface. The packets to be sent out are
- * stored in rte_mbuf structures whose pointers are supplied in the array of
- * mbufs, and the maximum number is indicated by num. It handles the freeing of
- * the mbufs in the free queue of KNI interface.
- *
- * @param kni
- * The KNI interface context.
- * @param mbufs
- * The array to store the pointers of mbufs.
- * @param num
- * The maximum number per burst.
- *
- * @return
- * The actual number of packets sent.
- */
-unsigned rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs,
- unsigned num);
-
-/**
- * Get the KNI context of its name.
- *
- * @param name
- * pointer to the KNI device name.
- *
- * @return
- * On success: Pointer to KNI interface.
- * On failure: NULL.
- */
-struct rte_kni *rte_kni_get(const char *name);
-
-/**
- * Get the name given to a KNI device
- *
- * @param kni
- * The KNI instance to query
- * @return
- * The pointer to the KNI name
- */
-const char *rte_kni_get_name(const struct rte_kni *kni);
-
-/**
- * Register KNI request handling for a specified port,and it can
- * be called by primary process or secondary process.
- *
- * @param kni
- * pointer to struct rte_kni.
- * @param ops
- * pointer to struct rte_kni_ops.
- *
- * @return
- * On success: 0
- * On failure: -1
- */
-int rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops);
-
-/**
- * Unregister KNI request handling for a specified port.
- *
- * @param kni
- * pointer to struct rte_kni.
- *
- * @return
- * On success: 0
- * On failure: -1
- */
-int rte_kni_unregister_handlers(struct rte_kni *kni);
-
-/**
- * Update link carrier state for KNI port.
- *
- * Update the linkup/linkdown state of a KNI interface in the kernel.
- *
- * @param kni
- * pointer to struct rte_kni.
- * @param linkup
- * New link state:
- * 0 for linkdown.
- * > 0 for linkup.
- *
- * @return
- * On failure: -1
- * Previous link state == linkdown: 0
- * Previous link state == linkup: 1
- */
-__rte_experimental
-int
-rte_kni_update_link(struct rte_kni *kni, unsigned int linkup);
-
-/**
- * Close KNI device.
- */
-void rte_kni_close(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_KNI_H_ */
diff --git a/lib/kni/rte_kni_common.h b/lib/kni/rte_kni_common.h
deleted file mode 100644
index 8d3ee0fa4fc2..000000000000
--- a/lib/kni/rte_kni_common.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1) */
-/*
- * Copyright(c) 2007-2014 Intel Corporation.
- */
-
-#ifndef _RTE_KNI_COMMON_H_
-#define _RTE_KNI_COMMON_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef __KERNEL__
-#include <linux/if.h>
-#include <asm/barrier.h>
-#define RTE_STD_C11
-#else
-#include <rte_common.h>
-#include <rte_config.h>
-#endif
-
-/*
- * KNI name is part of memzone name. Must not exceed IFNAMSIZ.
- */
-#define RTE_KNI_NAMESIZE 16
-
-#define RTE_CACHE_LINE_MIN_SIZE 64
-
-/*
- * Request id.
- */
-enum rte_kni_req_id {
- RTE_KNI_REQ_UNKNOWN = 0,
- RTE_KNI_REQ_CHANGE_MTU,
- RTE_KNI_REQ_CFG_NETWORK_IF,
- RTE_KNI_REQ_CHANGE_MAC_ADDR,
- RTE_KNI_REQ_CHANGE_PROMISC,
- RTE_KNI_REQ_CHANGE_ALLMULTI,
- RTE_KNI_REQ_MAX,
-};
-
-/*
- * Structure for KNI request.
- */
-struct rte_kni_request {
- uint32_t req_id; /**< Request id */
- RTE_STD_C11
- union {
- uint32_t new_mtu; /**< New MTU */
- uint8_t if_up; /**< 1: interface up, 0: interface down */
- uint8_t mac_addr[6]; /**< MAC address for interface */
- uint8_t promiscusity;/**< 1: promisc mode enable, 0: disable */
- uint8_t allmulti; /**< 1: all-multicast mode enable, 0: disable */
- };
- int32_t async : 1; /**< 1: request is asynchronous */
- int32_t result; /**< Result for processing request */
-} __attribute__((__packed__));
-
-/*
- * Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
- * Write and read should wrap around. Fifo is empty when write == read
- * Writing should never overwrite the read position
- */
-struct rte_kni_fifo {
-#ifdef RTE_USE_C11_MEM_MODEL
- unsigned write; /**< Next position to be written*/
- unsigned read; /**< Next position to be read */
-#else
- volatile unsigned write; /**< Next position to be written*/
- volatile unsigned read; /**< Next position to be read */
-#endif
- unsigned len; /**< Circular buffer length */
- unsigned elem_size; /**< Pointer size - for 32/64 bit OS */
- void *volatile buffer[]; /**< The buffer contains mbuf pointers */
-};
-
-/*
- * The kernel image of the rte_mbuf struct, with only the relevant fields.
- * Padding is necessary to assure the offsets of these fields
- */
-struct rte_kni_mbuf {
- void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
- uint64_t buf_iova;
- uint16_t data_off; /**< Start address of data in segment buffer. */
- char pad1[2];
- uint16_t nb_segs; /**< Number of segments. */
- char pad4[2];
- uint64_t ol_flags; /**< Offload features. */
- char pad2[4];
- uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
- uint16_t data_len; /**< Amount of data in segment buffer. */
- char pad3[14];
- void *pool;
-
- /* fields on second cache line */
- __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE)))
- void *next; /**< Physical address of next mbuf in kernel. */
-};
-
-/*
- * Struct used to create a KNI device. Passed to the kernel in IOCTL call
- */
-
-struct rte_kni_device_info {
- char name[RTE_KNI_NAMESIZE]; /**< Network device name for KNI */
-
- phys_addr_t tx_phys;
- phys_addr_t rx_phys;
- phys_addr_t alloc_phys;
- phys_addr_t free_phys;
-
- /* Used by Ethtool */
- phys_addr_t req_phys;
- phys_addr_t resp_phys;
- phys_addr_t sync_phys;
- void * sync_va;
-
- /* mbuf mempool */
- void * mbuf_va;
- phys_addr_t mbuf_phys;
-
- uint16_t group_id; /**< Group ID */
- uint32_t core_id; /**< core ID to bind for kernel thread */
-
- __extension__
- uint8_t force_bind : 1; /**< Flag for kernel thread binding */
-
- /* mbuf size */
- unsigned mbuf_size;
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- uint8_t mac_addr[6];
- uint8_t iova_mode;
-};
-
-#define KNI_DEVICE "kni"
-
-#define RTE_KNI_IOCTL_TEST _IOWR(0, 1, int)
-#define RTE_KNI_IOCTL_CREATE _IOWR(0, 2, struct rte_kni_device_info)
-#define RTE_KNI_IOCTL_RELEASE _IOWR(0, 3, struct rte_kni_device_info)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_KNI_COMMON_H_ */
diff --git a/lib/kni/rte_kni_fifo.h b/lib/kni/rte_kni_fifo.h
deleted file mode 100644
index d2ec82fe87fc..000000000000
--- a/lib/kni/rte_kni_fifo.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-
-
-/**
- * @internal when c11 memory model enabled use c11 atomic memory barrier.
- * when under non c11 memory model use rte_smp_* memory barrier.
- *
- * @param src
- * Pointer to the source data.
- * @param dst
- * Pointer to the destination data.
- * @param value
- * Data value.
- */
-#ifdef RTE_USE_C11_MEM_MODEL
-#define __KNI_LOAD_ACQUIRE(src) ({ \
- __atomic_load_n((src), __ATOMIC_ACQUIRE); \
- })
-#define __KNI_STORE_RELEASE(dst, value) do { \
- __atomic_store_n((dst), value, __ATOMIC_RELEASE); \
- } while(0)
-#else
-#define __KNI_LOAD_ACQUIRE(src) ({ \
- typeof (*(src)) val = *(src); \
- rte_smp_rmb(); \
- val; \
- })
-#define __KNI_STORE_RELEASE(dst, value) do { \
- *(dst) = value; \
- rte_smp_wmb(); \
- } while(0)
-#endif
-
-/**
- * Initializes the kni fifo structure
- */
-static void
-kni_fifo_init(struct rte_kni_fifo *fifo, unsigned size)
-{
- /* Ensure size is power of 2 */
- if (size & (size - 1))
- rte_panic("KNI fifo size must be power of 2\n");
-
- fifo->write = 0;
- fifo->read = 0;
- fifo->len = size;
- fifo->elem_size = sizeof(void *);
-}
-
-/**
- * Adds num elements into the fifo. Return the number actually written
- */
-static inline unsigned
-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned fifo_write = fifo->write;
- unsigned new_write = fifo_write;
- unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
-
- for (i = 0; i < num; i++) {
- new_write = (new_write + 1) & (fifo->len - 1);
-
- if (new_write == fifo_read)
- break;
- fifo->buffer[fifo_write] = data[i];
- fifo_write = new_write;
- }
- __KNI_STORE_RELEASE(&fifo->write, fifo_write);
- return i;
-}
-
-/**
- * Get up to num elements from the fifo. Return the number actually read
- */
-static inline unsigned
-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
-{
- unsigned i = 0;
- unsigned new_read = fifo->read;
- unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
-
- for (i = 0; i < num; i++) {
- if (new_read == fifo_write)
- break;
-
- data[i] = fifo->buffer[new_read];
- new_read = (new_read + 1) & (fifo->len - 1);
- }
- __KNI_STORE_RELEASE(&fifo->read, new_read);
- return i;
-}
-
-/**
- * Get the num of elements in the fifo
- */
-static inline uint32_t
-kni_fifo_count(struct rte_kni_fifo *fifo)
-{
- unsigned fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
- unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
- return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1);
-}
-
-/**
- * Get the num of available elements in the fifo
- */
-static inline uint32_t
-kni_fifo_free_count(struct rte_kni_fifo *fifo)
-{
- uint32_t fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write);
- uint32_t fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read);
- return (fifo_read - fifo_write - 1) & (fifo->len - 1);
-}
diff --git a/lib/kni/version.map b/lib/kni/version.map
deleted file mode 100644
index 13ffaa5bfd65..000000000000
--- a/lib/kni/version.map
+++ /dev/null
@@ -1,24 +0,0 @@
-DPDK_24 {
- global:
-
- rte_kni_alloc;
- rte_kni_close;
- rte_kni_get;
- rte_kni_get_name;
- rte_kni_handle_request;
- rte_kni_init;
- rte_kni_register_handlers;
- rte_kni_release;
- rte_kni_rx_burst;
- rte_kni_tx_burst;
- rte_kni_unregister_handlers;
-
- local: *;
-};
-
-EXPERIMENTAL {
- global:
-
- # updated in v21.08
- rte_kni_update_link;
-};
diff --git a/lib/meson.build b/lib/meson.build
index ecac701161c8..bbfa28ba86dd 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -39,7 +39,6 @@ libraries = [
'gso',
'ip_frag',
'jobstats',
- 'kni',
'latencystats',
'lpm',
'member',
@@ -73,7 +72,6 @@ optional_libs = [
'graph',
'gro',
'gso',
- 'kni',
'jobstats',
'latencystats',
'metrics',
@@ -86,10 +84,6 @@ optional_libs = [
'vhost',
]
-dpdk_libs_deprecated += [
- 'kni',
-]
-
disabled_libs = []
opt_disabled_libs = run_command(list_dir_globs, get_option('disable_libs'),
check: true).stdout().split()
diff --git a/lib/port/meson.build b/lib/port/meson.build
index 3ab37e2cb4b7..b0af2b185b39 100644
--- a/lib/port/meson.build
+++ b/lib/port/meson.build
@@ -45,9 +45,3 @@ if dpdk_conf.has('RTE_HAS_LIBPCAP')
dpdk_conf.set('RTE_PORT_PCAP', 1)
ext_deps += pcap_dep # dependency provided in config/meson.build
endif
-
-if dpdk_conf.has('RTE_LIB_KNI')
- sources += files('rte_port_kni.c')
- headers += files('rte_port_kni.h')
- deps += 'kni'
-endif
diff --git a/lib/port/rte_port_kni.c b/lib/port/rte_port_kni.c
deleted file mode 100644
index 1c7a6cb200ea..000000000000
--- a/lib/port/rte_port_kni.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>.
- * Copyright(c) 2016 Intel Corporation.
- */
-#include <string.h>
-
-#include <rte_malloc.h>
-#include <rte_kni.h>
-
-#include "rte_port_kni.h"
-
-/*
- * Port KNI Reader
- */
-#ifdef RTE_PORT_STATS_COLLECT
-
-#define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val) \
- port->stats.n_pkts_in += val
-#define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val) \
- port->stats.n_pkts_drop += val
-
-#else
-
-#define RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(port, val)
-#define RTE_PORT_KNI_READER_STATS_PKTS_DROP_ADD(port, val)
-
-#endif
-
-struct rte_port_kni_reader {
- struct rte_port_in_stats stats;
-
- struct rte_kni *kni;
-};
-
-static void *
-rte_port_kni_reader_create(void *params, int socket_id)
-{
- struct rte_port_kni_reader_params *conf =
- params;
- struct rte_port_kni_reader *port;
-
- /* Check input parameters */
- if (conf == NULL) {
- RTE_LOG(ERR, PORT, "%s: params is NULL\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->kni = conf->kni;
-
- return port;
-}
-
-static int
-rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
-{
- struct rte_port_kni_reader *p =
- port;
- uint16_t rx_pkt_cnt;
-
- rx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts);
- RTE_PORT_KNI_READER_STATS_PKTS_IN_ADD(p, rx_pkt_cnt);
- return rx_pkt_cnt;
-}
-
-static int
-rte_port_kni_reader_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_free(port);
-
- return 0;
-}
-
-static int rte_port_kni_reader_stats_read(void *port,
- struct rte_port_in_stats *stats, int clear)
-{
- struct rte_port_kni_reader *p =
- port;
-
- if (stats != NULL)
- memcpy(stats, &p->stats, sizeof(p->stats));
-
- if (clear)
- memset(&p->stats, 0, sizeof(p->stats));
-
- return 0;
-}
-
-/*
- * Port KNI Writer
- */
-#ifdef RTE_PORT_STATS_COLLECT
-
-#define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val) \
- port->stats.n_pkts_in += val
-#define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val) \
- port->stats.n_pkts_drop += val
-
-#else
-
-#define RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(port, val)
-#define RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(port, val)
-
-#endif
-
-struct rte_port_kni_writer {
- struct rte_port_out_stats stats;
-
- struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
- uint32_t tx_burst_sz;
- uint32_t tx_buf_count;
- uint64_t bsz_mask;
- struct rte_kni *kni;
-};
-
-static void *
-rte_port_kni_writer_create(void *params, int socket_id)
-{
- struct rte_port_kni_writer_params *conf =
- params;
- struct rte_port_kni_writer *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->tx_burst_sz == 0) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
- (!rte_is_power_of_2(conf->tx_burst_sz))) {
- RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->kni = conf->kni;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
- port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
-
- return port;
-}
-
-static inline void
-send_burst(struct rte_port_kni_writer *p)
-{
- uint32_t nb_tx;
-
- nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
-
- RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
- for (; nb_tx < p->tx_buf_count; nb_tx++)
- rte_pktmbuf_free(p->tx_buf[nb_tx]);
-
- p->tx_buf_count = 0;
-}
-
-static int
-rte_port_kni_writer_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_kni_writer *p =
- port;
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_kni_writer *p =
- port;
- uint64_t bsz_mask = p->bsz_mask;
- uint32_t tx_buf_count = p->tx_buf_count;
- uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
- ((pkts_mask & bsz_mask) ^ bsz_mask);
-
- if (expr == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t n_pkts_ok;
-
- if (tx_buf_count)
- send_burst(p);
-
- RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
- n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
-
- RTE_PORT_KNI_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
- for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
- struct rte_mbuf *pkt = pkts[n_pkts_ok];
-
- rte_pktmbuf_free(pkt);
- }
- } else {
- for (; pkts_mask;) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[tx_buf_count++] = pkt;
- RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
- pkts_mask &= ~pkt_mask;
- }
-
- p->tx_buf_count = tx_buf_count;
- if (tx_buf_count >= p->tx_burst_sz)
- send_burst(p);
- }
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_flush(void *port)
-{
- struct rte_port_kni_writer *p =
- port;
-
- if (p->tx_buf_count > 0)
- send_burst(p);
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_port_kni_writer_flush(port);
- rte_free(port);
-
- return 0;
-}
-
-static int rte_port_kni_writer_stats_read(void *port,
- struct rte_port_out_stats *stats, int clear)
-{
- struct rte_port_kni_writer *p =
- port;
-
- if (stats != NULL)
- memcpy(stats, &p->stats, sizeof(p->stats));
-
- if (clear)
- memset(&p->stats, 0, sizeof(p->stats));
-
- return 0;
-}
-
-/*
- * Port KNI Writer Nodrop
- */
-#ifdef RTE_PORT_STATS_COLLECT
-
-#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val) \
- port->stats.n_pkts_in += val
-#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val) \
- port->stats.n_pkts_drop += val
-
-#else
-
-#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(port, val)
-#define RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(port, val)
-
-#endif
-
-struct rte_port_kni_writer_nodrop {
- struct rte_port_out_stats stats;
-
- struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
- uint32_t tx_burst_sz;
- uint32_t tx_buf_count;
- uint64_t bsz_mask;
- uint64_t n_retries;
- struct rte_kni *kni;
-};
-
-static void *
-rte_port_kni_writer_nodrop_create(void *params, int socket_id)
-{
- struct rte_port_kni_writer_nodrop_params *conf =
- params;
- struct rte_port_kni_writer_nodrop *port;
-
- /* Check input parameters */
- if ((conf == NULL) ||
- (conf->tx_burst_sz == 0) ||
- (conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX) ||
- (!rte_is_power_of_2(conf->tx_burst_sz))) {
- RTE_LOG(ERR, PORT, "%s: Invalid input parameters\n", __func__);
- return NULL;
- }
-
- /* Memory allocation */
- port = rte_zmalloc_socket("PORT", sizeof(*port),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Failed to allocate port\n", __func__);
- return NULL;
- }
-
- /* Initialization */
- port->kni = conf->kni;
- port->tx_burst_sz = conf->tx_burst_sz;
- port->tx_buf_count = 0;
- port->bsz_mask = 1LLU << (conf->tx_burst_sz - 1);
-
- /*
- * When n_retries is 0 it means that we should wait for every packet to
- * send no matter how many retries should it take. To limit number of
- * branches in fast path, we use UINT64_MAX instead of branching.
- */
- port->n_retries = (conf->n_retries == 0) ? UINT64_MAX : conf->n_retries;
-
- return port;
-}
-
-static inline void
-send_burst_nodrop(struct rte_port_kni_writer_nodrop *p)
-{
- uint32_t nb_tx = 0, i;
-
- nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);
-
- /* We sent all the packets in a first try */
- if (nb_tx >= p->tx_buf_count) {
- p->tx_buf_count = 0;
- return;
- }
-
- for (i = 0; i < p->n_retries; i++) {
- nb_tx += rte_kni_tx_burst(p->kni,
- p->tx_buf + nb_tx,
- p->tx_buf_count - nb_tx);
-
- /* We sent all the packets in more than one try */
- if (nb_tx >= p->tx_buf_count) {
- p->tx_buf_count = 0;
- return;
- }
- }
-
- /* We didn't send the packets in maximum allowed attempts */
- RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
- for ( ; nb_tx < p->tx_buf_count; nb_tx++)
- rte_pktmbuf_free(p->tx_buf[nb_tx]);
-
- p->tx_buf_count = 0;
-}
-
-static int
-rte_port_kni_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
-{
- struct rte_port_kni_writer_nodrop *p =
- port;
-
- p->tx_buf[p->tx_buf_count++] = pkt;
- RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
- if (p->tx_buf_count >= p->tx_burst_sz)
- send_burst_nodrop(p);
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_nodrop_tx_bulk(void *port,
- struct rte_mbuf **pkts,
- uint64_t pkts_mask)
-{
- struct rte_port_kni_writer_nodrop *p =
- port;
-
- uint64_t bsz_mask = p->bsz_mask;
- uint32_t tx_buf_count = p->tx_buf_count;
- uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
- ((pkts_mask & bsz_mask) ^ bsz_mask);
-
- if (expr == 0) {
- uint64_t n_pkts = __builtin_popcountll(pkts_mask);
- uint32_t n_pkts_ok;
-
- if (tx_buf_count)
- send_burst_nodrop(p);
-
- RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
- n_pkts_ok = rte_kni_tx_burst(p->kni, pkts, n_pkts);
-
- if (n_pkts_ok >= n_pkts)
- return 0;
-
- /*
- * If we didn't manage to send all packets in single burst, move
- * remaining packets to the buffer and call send burst.
- */
- for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
- struct rte_mbuf *pkt = pkts[n_pkts_ok];
- p->tx_buf[p->tx_buf_count++] = pkt;
- }
- send_burst_nodrop(p);
- } else {
- for ( ; pkts_mask; ) {
- uint32_t pkt_index = __builtin_ctzll(pkts_mask);
- uint64_t pkt_mask = 1LLU << pkt_index;
- struct rte_mbuf *pkt = pkts[pkt_index];
-
- p->tx_buf[tx_buf_count++] = pkt;
- RTE_PORT_KNI_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
- pkts_mask &= ~pkt_mask;
- }
-
- p->tx_buf_count = tx_buf_count;
- if (tx_buf_count >= p->tx_burst_sz)
- send_burst_nodrop(p);
- }
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_nodrop_flush(void *port)
-{
- struct rte_port_kni_writer_nodrop *p =
- port;
-
- if (p->tx_buf_count > 0)
- send_burst_nodrop(p);
-
- return 0;
-}
-
-static int
-rte_port_kni_writer_nodrop_free(void *port)
-{
- if (port == NULL) {
- RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
- return -EINVAL;
- }
-
- rte_port_kni_writer_nodrop_flush(port);
- rte_free(port);
-
- return 0;
-}
-
-static int rte_port_kni_writer_nodrop_stats_read(void *port,
- struct rte_port_out_stats *stats, int clear)
-{
- struct rte_port_kni_writer_nodrop *p =
- port;
-
- if (stats != NULL)
- memcpy(stats, &p->stats, sizeof(p->stats));
-
- if (clear)
- memset(&p->stats, 0, sizeof(p->stats));
-
- return 0;
-}
-
-
-/*
- * Summary of port operations
- */
-struct rte_port_in_ops rte_port_kni_reader_ops = {
- .f_create = rte_port_kni_reader_create,
- .f_free = rte_port_kni_reader_free,
- .f_rx = rte_port_kni_reader_rx,
- .f_stats = rte_port_kni_reader_stats_read,
-};
-
-struct rte_port_out_ops rte_port_kni_writer_ops = {
- .f_create = rte_port_kni_writer_create,
- .f_free = rte_port_kni_writer_free,
- .f_tx = rte_port_kni_writer_tx,
- .f_tx_bulk = rte_port_kni_writer_tx_bulk,
- .f_flush = rte_port_kni_writer_flush,
- .f_stats = rte_port_kni_writer_stats_read,
-};
-
-struct rte_port_out_ops rte_port_kni_writer_nodrop_ops = {
- .f_create = rte_port_kni_writer_nodrop_create,
- .f_free = rte_port_kni_writer_nodrop_free,
- .f_tx = rte_port_kni_writer_nodrop_tx,
- .f_tx_bulk = rte_port_kni_writer_nodrop_tx_bulk,
- .f_flush = rte_port_kni_writer_nodrop_flush,
- .f_stats = rte_port_kni_writer_nodrop_stats_read,
-};
diff --git a/lib/port/rte_port_kni.h b/lib/port/rte_port_kni.h
deleted file mode 100644
index 280f58c121e2..000000000000
--- a/lib/port/rte_port_kni.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Ethan Zhuang <zhuangwj@gmail.com>.
- * Copyright(c) 2016 Intel Corporation.
- */
-
-#ifndef __INCLUDE_RTE_PORT_KNI_H__
-#define __INCLUDE_RTE_PORT_KNI_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @file
- * RTE Port KNI Interface
- *
- * kni_reader: input port built on top of pre-initialized KNI interface
- * kni_writer: output port built on top of pre-initialized KNI interface
- */
-
-#include <stdint.h>
-
-#include "rte_port.h"
-
-/** kni_reader port parameters */
-struct rte_port_kni_reader_params {
- /** KNI interface reference */
- struct rte_kni *kni;
-};
-
-/** kni_reader port operations */
-extern struct rte_port_in_ops rte_port_kni_reader_ops;
-
-
-/** kni_writer port parameters */
-struct rte_port_kni_writer_params {
- /** KNI interface reference */
- struct rte_kni *kni;
- /** Burst size to KNI interface. */
- uint32_t tx_burst_sz;
-};
-
-/** kni_writer port operations */
-extern struct rte_port_out_ops rte_port_kni_writer_ops;
-
-/** kni_writer_nodrop port parameters */
-struct rte_port_kni_writer_nodrop_params {
- /** KNI interface reference */
- struct rte_kni *kni;
- /** Burst size to KNI interface. */
- uint32_t tx_burst_sz;
- /** Maximum number of retries, 0 for no limit */
- uint32_t n_retries;
-};
-
-/** kni_writer_nodrop port operations */
-extern struct rte_port_out_ops rte_port_kni_writer_nodrop_ops;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/lib/port/version.map b/lib/port/version.map
index 83dbec7b01d2..fefcf29063f6 100644
--- a/lib/port/version.map
+++ b/lib/port/version.map
@@ -7,9 +7,6 @@ DPDK_24 {
rte_port_fd_reader_ops;
rte_port_fd_writer_nodrop_ops;
rte_port_fd_writer_ops;
- rte_port_kni_reader_ops;
- rte_port_kni_writer_nodrop_ops;
- rte_port_kni_writer_ops;
rte_port_ring_multi_reader_ops;
rte_port_ring_multi_writer_nodrop_ops;
rte_port_ring_multi_writer_ops;
diff --git a/meson_options.txt b/meson_options.txt
index 95e22e0ce70c..621e1ca9ba8c 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -10,7 +10,7 @@ option('disable_apps', type: 'string', value: '', description:
'Comma-separated list of apps to explicitly disable.')
option('disable_drivers', type: 'string', value: '', description:
'Comma-separated list of drivers to explicitly disable.')
-option('disable_libs', type: 'string', value: 'kni', description:
+option('disable_libs', type: 'string', value: '', description:
'Comma-separated list of libraries to explicitly disable. [NOTE: not all libs can be disabled]')
option('drivers_install_subdir', type: 'string', value: 'dpdk/pmds-<VERSION>', description:
'Subdirectory of libdir where to install PMDs. Defaults to using a versioned subdirectory.')
--
2.39.2
^ permalink raw reply [flat|nested] 9+ messages in thread