From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: Ciara Loftus <ciara.loftus@intel.com>
Subject: [PATCH 1/2] net/af_xdp: revert use BPF link for XDP programs
Date: Fri, 12 Nov 2021 10:30:01 +0000 [thread overview]
Message-ID: <20211112103002.50380-1-ciara.loftus@intel.com> (raw)
The commit ae70cc6e893b ("net/af_xdp: use BPF link for XDP programs")
caused compilation errors on kernels older than v5.8 due to absence of
the bpf_link_info struct and some definitions in the linux/bpf.h header.
Since relying on the reported kernel version is not a robust solution
and also since there doesn't appear to be a suitable definition in the bpf
header that the preprocessor could rely on to determine support for bpf
link, we will take a different approach to solving the issue that the
original patch attempted to solve. The next commit will address this.
Fixes: ae70cc6e893b ("net/af_xdp: use BPF link for XDP programs")
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/af_xdp/compat.h | 124 ----------------------------
drivers/net/af_xdp/meson.build | 7 --
drivers/net/af_xdp/rte_eth_af_xdp.c | 13 ++-
3 files changed, 5 insertions(+), 139 deletions(-)
diff --git a/drivers/net/af_xdp/compat.h b/drivers/net/af_xdp/compat.h
index 1d66f5e318..3880dc7dd7 100644
--- a/drivers/net/af_xdp/compat.h
+++ b/drivers/net/af_xdp/compat.h
@@ -2,7 +2,6 @@
* Copyright(c) 2020 Intel Corporation.
*/
-#include <bpf/bpf.h>
#include <bpf/xsk.h>
#include <linux/version.h>
#include <poll.h>
@@ -55,126 +54,3 @@ tx_syscall_needed(struct xsk_ring_prod *q __rte_unused)
return 1;
}
#endif
-
-#ifdef RTE_LIBRTE_AF_XDP_PMD_BPF_LINK
-static int link_lookup(int ifindex, int *link_fd)
-{
- struct bpf_link_info link_info;
- __u32 link_len;
- __u32 id = 0;
- int err;
- int fd;
-
- while (true) {
- err = bpf_link_get_next_id(id, &id);
- if (err) {
- if (errno == ENOENT) {
- err = 0;
- break;
- }
- break;
- }
-
- fd = bpf_link_get_fd_by_id(id);
- if (fd < 0) {
- if (errno == ENOENT)
- continue;
- err = -errno;
- break;
- }
-
- link_len = sizeof(struct bpf_link_info);
- memset(&link_info, 0, link_len);
- err = bpf_obj_get_info_by_fd(fd, &link_info, &link_len);
- if (err) {
- close(fd);
- break;
- }
- if (link_info.type == BPF_LINK_TYPE_XDP) {
- if ((int)link_info.xdp.ifindex == ifindex) {
- *link_fd = fd;
- break;
- }
- }
- close(fd);
- }
-
- return err;
-}
-
-static bool probe_bpf_link(void)
-{
- DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
- .flags = XDP_FLAGS_SKB_MODE);
- struct bpf_load_program_attr prog_attr;
- struct bpf_insn insns[2];
- int prog_fd, link_fd = -1;
- int ifindex_lo = 1;
- bool ret = false;
- int err;
-
- err = link_lookup(ifindex_lo, &link_fd);
- if (err)
- return ret;
-
- if (link_fd >= 0)
- return true;
-
- /* BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), */
- insns[0].code = BPF_ALU64 | BPF_MOV | BPF_K;
- insns[0].dst_reg = BPF_REG_0;
- insns[0].imm = XDP_PASS;
-
- /* BPF_EXIT_INSN() */
- insns[1].code = BPF_JMP | BPF_EXIT;
-
- memset(&prog_attr, 0, sizeof(prog_attr));
- prog_attr.prog_type = BPF_PROG_TYPE_XDP;
- prog_attr.insns = insns;
- prog_attr.insns_cnt = RTE_DIM(insns);
- prog_attr.license = "GPL";
-
- prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
- if (prog_fd < 0)
- return ret;
-
- link_fd = bpf_link_create(prog_fd, ifindex_lo, BPF_XDP, &opts);
- close(prog_fd);
-
- if (link_fd >= 0) {
- ret = true;
- close(link_fd);
- }
-
- return ret;
-}
-
-static int link_xdp_program(int if_index, int prog_fd, bool use_bpf_link)
-{
- DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
- int link_fd, ret = 0;
-
- if (!use_bpf_link)
- return bpf_set_link_xdp_fd(if_index, prog_fd,
- XDP_FLAGS_UPDATE_IF_NOEXIST);
-
- opts.flags = 0;
- link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, &opts);
- if (link_fd < 0)
- ret = -1;
-
- return ret;
-}
-#else
-static bool probe_bpf_link(void)
-{
- return false;
-}
-
-static int link_xdp_program(int if_index, int prog_fd,
- bool use_bpf_link __rte_unused)
-{
- return bpf_set_link_xdp_fd(if_index, prog_fd,
- XDP_FLAGS_UPDATE_IF_NOEXIST);
-}
-#endif
diff --git a/drivers/net/af_xdp/meson.build b/drivers/net/af_xdp/meson.build
index 42dc6d69ac..3ed2b29784 100644
--- a/drivers/net/af_xdp/meson.build
+++ b/drivers/net/af_xdp/meson.build
@@ -16,18 +16,11 @@ endif
if bpf_dep.found() and cc.has_header('bpf/xsk.h') and cc.has_header('linux/if_xdp.h')
ext_deps += bpf_dep
- # check for libbpf shared umem APIs
bpf_ver_dep = dependency('libbpf', version : '>=0.2.0',
required: false, method: 'pkg-config')
if bpf_ver_dep.found()
dpdk_conf.set('RTE_LIBRTE_AF_XDP_PMD_SHARED_UMEM', 1)
endif
- # check for libbpf bpf link support
- bpf_link_dep = dependency('libbpf', version : '>=0.4.0',
- required: false, method: 'pkg-config')
- if bpf_link_dep.found()
- dpdk_conf.set('RTE_LIBRTE_AF_XDP_PMD_BPF_LINK', 1)
- endif
else
build = false
reason = 'missing dependency, "libbpf"'
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 917e0bb453..1c7689c9b4 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -139,7 +139,6 @@ struct pmd_internals {
bool shared_umem;
char prog_path[PATH_MAX];
bool custom_prog_configured;
- bool use_bpf_link;
struct rte_ether_addr eth_addr;
@@ -973,8 +972,7 @@ eth_dev_close(struct rte_eth_dev *dev)
*/
dev->data->mac_addrs = NULL;
- if (!internals->use_bpf_link)
- remove_xdp_program(internals);
+ remove_xdp_program(internals);
if (internals->shared_umem) {
struct internal_list *list;
@@ -1149,7 +1147,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
}
static int
-load_custom_xdp_prog(const char *prog_path, int if_index, bool use_bpf_link)
+load_custom_xdp_prog(const char *prog_path, int if_index)
{
int ret, prog_fd = -1;
struct bpf_object *obj;
@@ -1173,7 +1171,8 @@ load_custom_xdp_prog(const char *prog_path, int if_index, bool use_bpf_link)
}
/* Link the program with the given network device */
- ret = link_xdp_program(if_index, prog_fd, use_bpf_link);
+ ret = bpf_set_link_xdp_fd(if_index, prog_fd,
+ XDP_FLAGS_UPDATE_IF_NOEXIST);
if (ret) {
AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
prog_fd);
@@ -1273,8 +1272,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
if (strnlen(internals->prog_path, PATH_MAX) &&
!internals->custom_prog_configured) {
ret = load_custom_xdp_prog(internals->prog_path,
- internals->if_index,
- internals->use_bpf_link);
+ internals->if_index);
if (ret) {
AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
internals->prog_path);
@@ -1691,7 +1689,6 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
strlcpy(internals->if_name, if_name, IFNAMSIZ);
strlcpy(internals->prog_path, prog_path, PATH_MAX);
internals->custom_prog_configured = 0;
- internals->use_bpf_link = probe_bpf_link();
#ifndef ETH_AF_XDP_SHARED_UMEM
if (shared_umem) {
--
2.17.1
next reply other threads:[~2021-11-12 10:30 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-12 10:30 Ciara Loftus [this message]
2021-11-12 10:30 ` [PATCH 2/2] net/af_xdp: workaround custom program loading issue Ciara Loftus
2021-11-15 17:08 ` [PATCH 1/2] net/af_xdp: revert use BPF link for XDP programs Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211112103002.50380-1-ciara.loftus@intel.com \
--to=ciara.loftus@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).