DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v3] net/af_xdp: custom XDP program loading
@ 2020-09-25  9:20 Ciara Loftus
  2020-09-30 15:20 ` Ferruh Yigit
  0 siblings, 1 reply; 2+ messages in thread
From: Ciara Loftus @ 2020-09-25  9:20 UTC (permalink / raw)
  To: dev; +Cc: Ciara Loftus

The new 'xdp_prog=<string>' vdev arg allows the user to specify the path to
a custom XDP program to be set on the device, instead of the default libbpf
one. The program must have an XSK_MAP of name 'xsks_map' which will allow
for the redirection of some packets to userspace and thus the PMD, using
some criteria defined in the program. This can be useful for filtering
purposes, for example if we only want a subset of packets to reach
userspace or to drop or process a subset of packets in the kernel.

Note: a netdev may only load one program.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Tested-by: Xuekun Hu <xuekun.hu@intel.com>
---
v3:
* Rebased to next-net

v2:
* Modified error checking for strnlen return.
* Fixed copyright header edits

 doc/guides/nics/af_xdp.rst          |  1 +
 drivers/net/af_xdp/rte_eth_af_xdp.c | 99 +++++++++++++++++++++++++++--
 2 files changed, 95 insertions(+), 5 deletions(-)

diff --git a/doc/guides/nics/af_xdp.rst b/doc/guides/nics/af_xdp.rst
index 78a088468f..be268fe7ff 100644
--- a/doc/guides/nics/af_xdp.rst
+++ b/doc/guides/nics/af_xdp.rst
@@ -34,6 +34,7 @@ The following options can be provided to set up an af_xdp port in DPDK.
 *   ``queue_count`` - total netdev queue number (optional, default 1);
 *   ``shared_umem`` - PMD will attempt to share UMEM with others (optional,
     default 0);
+*   ``xdp_prog`` - path to custom xdp program (optional, default none);
 
 Prerequisites
 -------------
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 01f462b465..8dfdadb3c2 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -126,6 +126,8 @@ struct pmd_internals {
 	int max_queue_cnt;
 	int combined_queue_cnt;
 	bool shared_umem;
+	char prog_path[PATH_MAX];
+	bool custom_prog_configured;
 
 	struct rte_ether_addr eth_addr;
 
@@ -137,12 +139,14 @@ struct pmd_internals {
 #define ETH_AF_XDP_START_QUEUE_ARG		"start_queue"
 #define ETH_AF_XDP_QUEUE_COUNT_ARG		"queue_count"
 #define ETH_AF_XDP_SHARED_UMEM_ARG		"shared_umem"
+#define ETH_AF_XDP_PROG_ARG			"xdp_prog"
 
 static const char * const valid_arguments[] = {
 	ETH_AF_XDP_IFACE_ARG,
 	ETH_AF_XDP_START_QUEUE_ARG,
 	ETH_AF_XDP_QUEUE_COUNT_ARG,
 	ETH_AF_XDP_SHARED_UMEM_ARG,
+	ETH_AF_XDP_PROG_ARG,
 	NULL
 };
 
@@ -1021,6 +1025,45 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
 	return NULL;
 }
 
+static int
+load_custom_xdp_prog(const char *prog_path, int if_index)
+{
+	int ret, prog_fd = -1;
+	struct bpf_object *obj;
+	struct bpf_map *map;
+
+	ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+	if (ret) {
+		AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
+		return ret;
+	}
+
+	/*
+	 * The loaded program must provision for a map of xsks, such that some
+	 * traffic can be redirected to userspace. When the xsk is created,
+	 * libbpf inserts it into the map.
+	 */
+	map = bpf_object__find_map_by_name(obj, "xsks_map");
+	if (!map) {
+		AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
+		return -1;
+	}
+
+	/* Link the program with the given network device */
+	ret = bpf_set_link_xdp_fd(if_index, prog_fd,
+					XDP_FLAGS_UPDATE_IF_NOEXIST);
+	if (ret) {
+		AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
+				prog_fd);
+		return -1;
+	}
+
+	AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
+				prog_path, prog_fd);
+
+	return 0;
+}
+
 static int
 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
 	      int ring_size)
@@ -1046,6 +1089,18 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
 	cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
 #endif
 
+	if (strnlen(internals->prog_path, PATH_MAX) &&
+				!internals->custom_prog_configured) {
+		ret = load_custom_xdp_prog(internals->prog_path,
+					   internals->if_index);
+		if (ret) {
+			AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
+					internals->prog_path);
+			goto err;
+		}
+		internals->custom_prog_configured = 1;
+	}
+
 	if (internals->shared_umem)
 		ret = create_shared_socket(&rxq->xsk, internals->if_name,
 				rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
@@ -1264,6 +1319,30 @@ parse_name_arg(const char *key __rte_unused,
 	return 0;
 }
 
+/** parse xdp prog argument */
+static int
+parse_prog_arg(const char *key __rte_unused,
+	       const char *value, void *extra_args)
+{
+	char *path = extra_args;
+
+	if (strnlen(value, PATH_MAX) == PATH_MAX) {
+		AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
+			   value, PATH_MAX);
+		return -EINVAL;
+	}
+
+	if (access(value, F_OK) != 0) {
+		AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
+			   value, strerror(errno));
+		return -EINVAL;
+	}
+
+	strlcpy(path, value, PATH_MAX);
+
+	return 0;
+}
+
 static int
 xdp_get_channels_info(const char *if_name, int *max_queues,
 				int *combined_queues)
@@ -1307,7 +1386,7 @@ xdp_get_channels_info(const char *if_name, int *max_queues,
 
 static int
 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
-			int *queue_cnt, int *shared_umem)
+			int *queue_cnt, int *shared_umem, char *prog_path)
 {
 	int ret;
 
@@ -1333,6 +1412,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
 	if (ret < 0)
 		goto free_kvlist;
 
+	ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
+				 &parse_prog_arg, prog_path);
+	if (ret < 0)
+		goto free_kvlist;
+
 free_kvlist:
 	rte_kvargs_free(kvlist);
 	return ret;
@@ -1370,7 +1454,8 @@ get_iface_info(const char *if_name,
 
 static struct rte_eth_dev *
 init_internals(struct rte_vdev_device *dev, const char *if_name,
-			int start_queue_idx, int queue_cnt, int shared_umem)
+		int start_queue_idx, int queue_cnt, int shared_umem,
+		const char *prog_path)
 {
 	const char *name = rte_vdev_device_name(dev);
 	const unsigned int numa_node = dev->device.numa_node;
@@ -1386,6 +1471,8 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
 	internals->start_queue_idx = start_queue_idx;
 	internals->queue_cnt = queue_cnt;
 	strlcpy(internals->if_name, if_name, IFNAMSIZ);
+	strlcpy(internals->prog_path, prog_path, PATH_MAX);
+	internals->custom_prog_configured = 0;
 
 #ifndef ETH_AF_XDP_SHARED_UMEM
 	if (shared_umem) {
@@ -1472,6 +1559,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
 	int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
 	int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
 	int shared_umem = 0;
+	char prog_path[PATH_MAX] = {'\0'};
 	struct rte_eth_dev *eth_dev = NULL;
 	const char *name;
 
@@ -1501,7 +1589,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
 		dev->device.numa_node = rte_socket_id();
 
 	if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
-			     &xsk_queue_cnt, &shared_umem) < 0) {
+			     &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
 		AF_XDP_LOG(ERR, "Invalid kvargs value\n");
 		return -EINVAL;
 	}
@@ -1512,7 +1600,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
 	}
 
 	eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
-					xsk_queue_cnt, shared_umem);
+					xsk_queue_cnt, shared_umem, prog_path);
 	if (eth_dev == NULL) {
 		AF_XDP_LOG(ERR, "Failed to init internals\n");
 		return -1;
@@ -1556,4 +1644,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
 			      "iface=<string> "
 			      "start_queue=<int> "
 			      "queue_count=<int> "
-			      "shared_umem=<int> ");
+			      "shared_umem=<int> "
+			      "xdp_prog=<string> ");
-- 
2.17.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH v3] net/af_xdp: custom XDP program loading
  2020-09-25  9:20 [dpdk-dev] [PATCH v3] net/af_xdp: custom XDP program loading Ciara Loftus
@ 2020-09-30 15:20 ` Ferruh Yigit
  0 siblings, 0 replies; 2+ messages in thread
From: Ferruh Yigit @ 2020-09-30 15:20 UTC (permalink / raw)
  To: Ciara Loftus, dev

On 9/25/2020 10:20 AM, Ciara Loftus wrote:
> The new 'xdp_prog=<string>' vdev arg allows the user to specify the path to
> a custom XDP program to be set on the device, instead of the default libbpf
> one. The program must have an XSK_MAP of name 'xsks_map' which will allow
> for the redirection of some packets to userspace and thus the PMD, using
> some criteria defined in the program. This can be useful for filtering
> purposes, for example if we only want a subset of packets to reach
> userspace or to drop or process a subset of packets in the kernel.
> 
> Note: a netdev may only load one program.
> 
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
> Tested-by: Xuekun Hu <xuekun.hu@intel.com>

Applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-09-30 15:20 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-25  9:20 [dpdk-dev] [PATCH v3] net/af_xdp: custom XDP program loading Ciara Loftus
2020-09-30 15:20 ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).