DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs
@ 2018-06-08  8:42 Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 1/3] bpf: add extra information for external symbol definitions Konstantin Ananyev
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Konstantin Ananyev @ 2018-06-08  8:42 UTC (permalink / raw)
  To: dev, dev; +Cc: Konstantin Ananyev

As first step extend struct rte_bpf_xsym with new fields to provide
extra information for allowed external varaibles and helper functions.
That allows to extend bpf validation with new checks for:
 - use/return uninitialized registers and/or stack data
 - possible memory access boundaries violation
 - invalid arguments for the function

Konstantin Ananyev (3):
  bpf: add extra information for external symbol definitions
  bpf: add extra validation for input BPF program
  test/bpf: add new test-case for external function call

 app/test-pmd/bpf_cmd.c        |   27 +-
 lib/librte_bpf/bpf_def.h      |    5 +
 lib/librte_bpf/bpf_exec.c     |    2 +-
 lib/librte_bpf/bpf_impl.h     |   14 +
 lib/librte_bpf/bpf_jit_x86.c  |   17 +-
 lib/librte_bpf/bpf_load.c     |   49 +-
 lib/librte_bpf/bpf_load_elf.c |    4 +-
 lib/librte_bpf/bpf_validate.c | 1136 +++++++++++++++++++++++++++++++++++++++--
 lib/librte_bpf/rte_bpf.h      |   21 +-
 test/test/test_bpf.c          |  169 +++++-
 10 files changed, 1382 insertions(+), 62 deletions(-)

-- 
2.13.6

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 1/3] bpf: add extra information for external symbol definitions
  2018-06-08  8:42 [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Konstantin Ananyev
@ 2018-06-08  8:42 ` Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 2/3] bpf: add extra validation for input BPF program Konstantin Ananyev
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Konstantin Ananyev @ 2018-06-08  8:42 UTC (permalink / raw)
  To: dev, dev; +Cc: Konstantin Ananyev

Extend struct rte_bpf_xsym with new fields to provide information about:
 - for variables - type and size
 - for functions - number of arguments and type/size of each argument
   and return value

Such information would allow validate code to perform
more extensive checking on input BPF program and catch
misbehaving BPF code.

That change would cause ABI/API breakage for librte_bpf.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test-pmd/bpf_cmd.c        | 27 ++++++++++++++++++++++--
 lib/librte_bpf/bpf_def.h      |  5 +++++
 lib/librte_bpf/bpf_exec.c     |  2 +-
 lib/librte_bpf/bpf_impl.h     | 14 +++++++++++++
 lib/librte_bpf/bpf_jit_x86.c  | 17 ++-------------
 lib/librte_bpf/bpf_load.c     | 49 ++++++++++++++++++++++++++++++++++++++++++-
 lib/librte_bpf/bpf_load_elf.c |  4 ++--
 lib/librte_bpf/rte_bpf.h      | 21 +++++++++++++++----
 test/test/test_bpf.c          | 19 ++++++++++++++++-
 9 files changed, 132 insertions(+), 26 deletions(-)

diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c
index 584fad908..830bfc13a 100644
--- a/app/test-pmd/bpf_cmd.c
+++ b/app/test-pmd/bpf_cmd.c
@@ -19,12 +19,35 @@ static const struct rte_bpf_xsym bpf_xsym[] = {
 	{
 		.name = RTE_STR(stdout),
 		.type = RTE_BPF_XTYPE_VAR,
-		.var = &stdout,
+		.var = {
+			.val = &stdout,
+			.desc = {
+				.type = RTE_BPF_ARG_PTR,
+				.size = sizeof(stdout),
+			},
+		},
 	},
 	{
 		.name = RTE_STR(rte_pktmbuf_dump),
 		.type = RTE_BPF_XTYPE_FUNC,
-		.func = (void *)rte_pktmbuf_dump,
+		.func = {
+			.val = (void *)rte_pktmbuf_dump,
+			.nb_args = 3,
+			.args = {
+				[0] = {
+					.type = RTE_BPF_ARG_RAW,
+					.size = sizeof(uintptr_t),
+				},
+				[1] = {
+					.type = RTE_BPF_ARG_PTR_MBUF,
+					.size = sizeof(struct rte_mbuf),
+				},
+				[2] = {
+					.type = RTE_BPF_ARG_RAW,
+					.size = sizeof(uint32_t),
+				},
+			},
+		},
 	},
 };
 
diff --git a/lib/librte_bpf/bpf_def.h b/lib/librte_bpf/bpf_def.h
index 6b69de345..c10f3aec4 100644
--- a/lib/librte_bpf/bpf_def.h
+++ b/lib/librte_bpf/bpf_def.h
@@ -131,6 +131,11 @@ struct ebpf_insn {
 	int32_t imm;
 };
 
+/*
+ * eBPF allows functions with R1-R5 as arguments.
+ */
+#define	EBPF_FUNC_MAX_ARGS	(EBPF_REG_6 - EBPF_REG_1)
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_bpf/bpf_exec.c b/lib/librte_bpf/bpf_exec.c
index e373b1f3d..6a79139c0 100644
--- a/lib/librte_bpf/bpf_exec.c
+++ b/lib/librte_bpf/bpf_exec.c
@@ -402,7 +402,7 @@ bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
 			break;
 		/* call instructions */
 		case (BPF_JMP | EBPF_CALL):
-			reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func(
+			reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func.val(
 				reg[EBPF_REG_1], reg[EBPF_REG_2],
 				reg[EBPF_REG_3], reg[EBPF_REG_4],
 				reg[EBPF_REG_5]);
diff --git a/lib/librte_bpf/bpf_impl.h b/lib/librte_bpf/bpf_impl.h
index 5d7e65c31..b577e2cbe 100644
--- a/lib/librte_bpf/bpf_impl.h
+++ b/lib/librte_bpf/bpf_impl.h
@@ -34,6 +34,20 @@ extern int rte_bpf_logtype;
 #define	RTE_BPF_LOG(lvl, fmt, args...) \
 	rte_log(RTE_LOG_## lvl, rte_bpf_logtype, fmt, ##args)
 
+static inline size_t
+bpf_size(uint32_t bpf_op_sz)
+{
+	if (bpf_op_sz == BPF_B)
+		return sizeof(uint8_t);
+	else if (bpf_op_sz == BPF_H)
+		return sizeof(uint16_t);
+	else if (bpf_op_sz == BPF_W)
+		return sizeof(uint32_t);
+	else if (bpf_op_sz == EBPF_DW)
+		return sizeof(uint64_t);
+	return 0;
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/librte_bpf/bpf_jit_x86.c b/lib/librte_bpf/bpf_jit_x86.c
index 111e028d2..68ea389f2 100644
--- a/lib/librte_bpf/bpf_jit_x86.c
+++ b/lib/librte_bpf/bpf_jit_x86.c
@@ -113,20 +113,6 @@ union bpf_jit_imm {
 	uint8_t u8[4];
 };
 
-static size_t
-bpf_size(uint32_t bpf_op_sz)
-{
-	if (bpf_op_sz == BPF_B)
-		return sizeof(uint8_t);
-	else if (bpf_op_sz == BPF_H)
-		return sizeof(uint16_t);
-	else if (bpf_op_sz == BPF_W)
-		return sizeof(uint32_t);
-	else if (bpf_op_sz == EBPF_DW)
-		return sizeof(uint64_t);
-	return 0;
-}
-
 /*
  * In many cases for imm8 we can produce shorter code.
  */
@@ -1294,7 +1280,8 @@ emit(struct bpf_jit_state *st, const struct rte_bpf *bpf)
 			break;
 		/* call instructions */
 		case (BPF_JMP | EBPF_CALL):
-			emit_call(st, (uintptr_t)bpf->prm.xsym[ins->imm].func);
+			emit_call(st,
+				(uintptr_t)bpf->prm.xsym[ins->imm].func.val);
 			break;
 		/* return instruction */
 		case (BPF_JMP | EBPF_EXIT):
diff --git a/lib/librte_bpf/bpf_load.c b/lib/librte_bpf/bpf_load.c
index d1c9abd7f..2b84fe724 100644
--- a/lib/librte_bpf/bpf_load.c
+++ b/lib/librte_bpf/bpf_load.c
@@ -51,17 +51,64 @@ bpf_load(const struct rte_bpf_prm *prm)
 	return bpf;
 }
 
+/*
+ * Check that user provided external symbol.
+ */
+static int
+bpf_check_xsym(const struct rte_bpf_xsym *xsym)
+{
+	uint32_t i;
+
+	if (xsym->name == NULL)
+		return -EINVAL;
+
+	if (xsym->type == RTE_BPF_XTYPE_VAR) {
+		if (xsym->var.desc.type == RTE_BPF_ARG_UNDEF)
+			return -EINVAL;
+	} else if (xsym->type == RTE_BPF_XTYPE_FUNC) {
+
+		if (xsym->func.nb_args > EBPF_FUNC_MAX_ARGS)
+			return -EINVAL;
+
+		/* check function arguments */
+		for (i = 0; i != xsym->func.nb_args; i++) {
+			if (xsym->func.args[i].type == RTE_BPF_ARG_UNDEF)
+				return -EINVAL;
+		}
+
+		/* check return value info */
+		if (xsym->func.ret.type != RTE_BPF_ARG_UNDEF &&
+				xsym->func.ret.size == 0)
+			return -EINVAL;
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
 __rte_experimental struct rte_bpf *
 rte_bpf_load(const struct rte_bpf_prm *prm)
 {
 	struct rte_bpf *bpf;
 	int32_t rc;
+	uint32_t i;
 
-	if (prm == NULL || prm->ins == NULL) {
+	if (prm == NULL || prm->ins == NULL ||
+			(prm->nb_xsym != 0 && prm->xsym == NULL)) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
 
+	rc = 0;
+	for (i = 0; i != prm->nb_xsym && rc == 0; i++)
+		rc = bpf_check_xsym(prm->xsym + i);
+
+	if (rc != 0) {
+		rte_errno = -rc;
+		RTE_BPF_LOG(ERR, "%s: %d-th xsym is invalid\n", __func__, i);
+		return NULL;
+	}
+
 	bpf = bpf_load(prm);
 	if (bpf == NULL) {
 		rte_errno = ENOMEM;
diff --git a/lib/librte_bpf/bpf_load_elf.c b/lib/librte_bpf/bpf_load_elf.c
index 6ab03d86e..96d3630fe 100644
--- a/lib/librte_bpf/bpf_load_elf.c
+++ b/lib/librte_bpf/bpf_load_elf.c
@@ -81,9 +81,9 @@ resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz,
 		ins[idx].imm = fidx;
 	/* for variable we need to store its absolute address */
 	else {
-		ins[idx].imm = (uintptr_t)prm->xsym[fidx].var;
+		ins[idx].imm = (uintptr_t)prm->xsym[fidx].var.val;
 		ins[idx + 1].imm =
-			(uint64_t)(uintptr_t)prm->xsym[fidx].var >> 32;
+			(uint64_t)(uintptr_t)prm->xsym[fidx].var.val >> 32;
 	}
 
 	return 0;
diff --git a/lib/librte_bpf/rte_bpf.h b/lib/librte_bpf/rte_bpf.h
index 1249a992c..ad62ef2c6 100644
--- a/lib/librte_bpf/rte_bpf.h
+++ b/lib/librte_bpf/rte_bpf.h
@@ -40,7 +40,11 @@ enum rte_bpf_arg_type {
  */
 struct rte_bpf_arg {
 	enum rte_bpf_arg_type type;
-	size_t size;     /**< for pointer types, size of data it points to */
+	/**
+	 * for ptr type - max size of data buffer it points to
+	 * for raw type - the size (in bytes) of the value
+	 */
+	size_t size;
 	size_t buf_size;
 	/**< for mbuf ptr type, max size of rte_mbuf data buffer */
 };
@@ -66,10 +70,19 @@ struct rte_bpf_xsym {
 	const char *name;        /**< name */
 	enum rte_bpf_xtype type; /**< type */
 	union {
-		uint64_t (*func)(uint64_t, uint64_t, uint64_t,
+		struct {
+			uint64_t (*val)(uint64_t, uint64_t, uint64_t,
 				uint64_t, uint64_t);
-		void *var;
-	}; /**< value */
+			uint32_t nb_args;
+			struct rte_bpf_arg args[EBPF_FUNC_MAX_ARGS];
+			/**< Function arguments descriptions. */
+			struct rte_bpf_arg ret; /**< function return value. */
+		} func;
+		struct {
+			void *val; /**< actual memory location */
+			struct rte_bpf_arg desc; /**< type, size, etc. */
+		} var; /**< external variable */
+	};
 };
 
 /**
diff --git a/test/test/test_bpf.c b/test/test/test_bpf.c
index cbd6be63d..1e9caef95 100644
--- a/test/test/test_bpf.c
+++ b/test/test/test_bpf.c
@@ -1530,7 +1530,24 @@ static const struct rte_bpf_xsym test_call1_xsym[] = {
 	{
 		.name = RTE_STR(dummy_func1),
 		.type = RTE_BPF_XTYPE_FUNC,
-		.func = (void *)dummy_func1,
+		.func = {
+			.val = (void *)dummy_func1,
+			.nb_args = 3,
+			.args = {
+				[0] = {
+					.type = RTE_BPF_ARG_PTR,
+					.size = sizeof(struct dummy_offset),
+				},
+				[1] = {
+					.type = RTE_BPF_ARG_PTR,
+					.size = sizeof(uint32_t),
+				},
+				[2] = {
+					.type = RTE_BPF_ARG_PTR,
+					.size = sizeof(uint64_t),
+				},
+			},
+		},
 	},
 };
 
-- 
2.13.6

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 2/3] bpf: add extra validation for input BPF program
  2018-06-08  8:42 [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 1/3] bpf: add extra information for external symbol definitions Konstantin Ananyev
@ 2018-06-08  8:42 ` Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 3/3] test/bpf: add new test-case for external function call Konstantin Ananyev
  2018-07-12  7:46 ` [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Thomas Monjalon
  3 siblings, 0 replies; 5+ messages in thread
From: Konstantin Ananyev @ 2018-06-08  8:42 UTC (permalink / raw)
  To: dev, dev; +Cc: Konstantin Ananyev

Add checks for:
 - use/return uninitialized registers and/or stack data
 - possible memory access boundaries violation
 - invalid arguments for the function

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 lib/librte_bpf/bpf_validate.c | 1136 +++++++++++++++++++++++++++++++++++++++--
 1 file changed, 1100 insertions(+), 36 deletions(-)

diff --git a/lib/librte_bpf/bpf_validate.c b/lib/librte_bpf/bpf_validate.c
index b7081c853..83983efc4 100644
--- a/lib/librte_bpf/bpf_validate.c
+++ b/lib/librte_bpf/bpf_validate.c
@@ -11,9 +11,28 @@
 
 #include <rte_common.h>
 #include <rte_eal.h>
+#include <rte_byteorder.h>
 
 #include "bpf_impl.h"
 
+struct bpf_reg_val {
+	struct rte_bpf_arg v;
+	uint64_t mask;
+	struct {
+		int64_t min;
+		int64_t max;
+	} s;
+	struct {
+		uint64_t min;
+		uint64_t max;
+	} u;
+};
+
+struct bpf_eval_state {
+	struct bpf_reg_val rv[EBPF_REG_NUM];
+	struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
+};
+
 /* possible instruction node colour */
 enum {
 	WHITE,
@@ -31,14 +50,6 @@ enum {
 	MAX_EDGE_TYPE
 };
 
-struct bpf_reg_state {
-	uint64_t val;
-};
-
-struct bpf_eval_state {
-	struct bpf_reg_state rs[EBPF_REG_NUM];
-};
-
 #define	MAX_EDGES	2
 
 struct inst_node {
@@ -54,12 +65,13 @@ struct inst_node {
 struct bpf_verifier {
 	const struct rte_bpf_prm *prm;
 	struct inst_node *in;
-	int32_t stack_sz;
+	uint64_t stack_sz;
 	uint32_t nb_nodes;
 	uint32_t nb_jcc_nodes;
 	uint32_t node_colour[MAX_NODE_COLOUR];
 	uint32_t edge_type[MAX_EDGE_TYPE];
 	struct bpf_eval_state *evst;
+	struct inst_node *evin;
 	struct {
 		uint32_t num;
 		uint32_t cur;
@@ -101,40 +113,823 @@ check_alu_bele(const struct ebpf_insn *ins)
 }
 
 static const char *
-eval_stack(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+	RTE_SET_USED(ins);
+	if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
+		return "undefined return value";
+	return NULL;
+}
+
+/* setup max possible with this mask bounds */
+static void
+eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+	rv->u.max = mask;
+	rv->u.min = 0;
+}
+
+static void
+eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+	rv->s.max = mask >> 1;
+	rv->s.min = rv->s.max ^ UINT64_MAX;
+}
+
+static void
+eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+	eval_umax_bound(rv, mask);
+	eval_smax_bound(rv, mask);
+}
+
+static void
+eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+	eval_max_bound(rv, mask);
+	rv->v.type = RTE_BPF_ARG_RAW;
+	rv->mask = mask;
+}
+
+static void
+eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
+{
+	rv->mask = mask;
+	rv->s.min = val;
+	rv->s.max = val;
+	rv->u.min = val;
+	rv->u.max = val;
+}
+
+static void
+eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
+{
+	uint64_t v;
+
+	v = (uint64_t)imm & mask;
+
+	rv->v.type = RTE_BPF_ARG_RAW;
+	eval_fill_imm64(rv, mask, v);
+}
+
+static const char *
+eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
 {
-	int32_t ofs;
+	uint32_t i;
+	uint64_t val;
+	struct bpf_reg_val *rd;
+
+	val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
 
-	ofs = ins->off;
+	rd = bvf->evst->rv + ins->dst_reg;
+	rd->v.type = RTE_BPF_ARG_RAW;
+	eval_fill_imm64(rd, UINT64_MAX, val);
 
-	if (ofs >= 0 || ofs < -MAX_BPF_STACK_SIZE)
-		return "stack boundary violation";
+	for (i = 0; i != bvf->prm->nb_xsym; i++) {
+
+		/* load of external variable */
+		if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
+				(uintptr_t)bvf->prm->xsym[i].var.val == val) {
+			rd->v = bvf->prm->xsym[i].var.desc;
+			eval_fill_imm64(rd, UINT64_MAX, 0);
+			break;
+		}
+	}
 
-	ofs = -ofs;
-	bvf->stack_sz = RTE_MAX(bvf->stack_sz, ofs);
 	return NULL;
 }
 
+static void
+eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
+{
+	struct bpf_reg_val rt;
+
+	rt.u.min = rv->u.min & mask;
+	rt.u.max = rv->u.max & mask;
+	if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
+		rv->u.max = RTE_MAX(rt.u.max, mask);
+		rv->u.min = 0;
+	}
+
+	eval_smax_bound(&rt, mask);
+	rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
+	rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
+
+	rv->mask = mask;
+}
+
+static void
+eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
+{
+	struct bpf_reg_val rv;
+
+	rv.u.min = (rd->u.min + rs->u.min) & msk;
+	rv.u.max = (rd->u.min + rs->u.max) & msk;
+	rv.s.min = (rd->s.min + rs->s.min) & msk;
+	rv.s.max = (rd->s.max + rs->s.max) & msk;
+
+	/*
+	 * if at least one of the operands is not constant,
+	 * then check for overflow
+	 */
+	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
+			(rv.u.min < rd->u.min || rv.u.max < rd->u.max))
+		eval_umax_bound(&rv, msk);
+
+	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
+			(((rs->s.min < 0 && rv.s.min > rd->s.min) ||
+			rv.s.min < rd->s.min) ||
+			((rs->s.max < 0 && rv.s.max > rd->s.max) ||
+				rv.s.max < rd->s.max)))
+		eval_smax_bound(&rv, msk);
+
+	rd->s = rv.s;
+	rd->u = rv.u;
+}
+
+static void
+eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
+{
+	struct bpf_reg_val rv;
+
+	rv.u.min = (rd->u.min - rs->u.min) & msk;
+	rv.u.max = (rd->u.min - rs->u.max) & msk;
+	rv.s.min = (rd->s.min - rs->s.min) & msk;
+	rv.s.max = (rd->s.max - rs->s.max) & msk;
+
+	/*
+	 * if at least one of the operands is not constant,
+	 * then check for overflow
+	 */
+	if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
+			(rv.u.min > rd->u.min || rv.u.max > rd->u.max))
+		eval_umax_bound(&rv, msk);
+
+	if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
+			(((rs->s.min < 0 && rv.s.min < rd->s.min) ||
+			rv.s.min > rd->s.min) ||
+			((rs->s.max < 0 && rv.s.max < rd->s.max) ||
+			rv.s.max > rd->s.max)))
+		eval_smax_bound(&rv, msk);
+
+	rd->s = rv.s;
+	rd->u = rv.u;
+}
+
+static void
+eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* check if shift value is less then max result bits */
+	if (rs->u.max >= opsz) {
+		eval_max_bound(rd, msk);
+		return;
+	}
+
+	/* check for overflow */
+	if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
+		eval_umax_bound(rd, msk);
+	else {
+		rd->u.max <<= rs->u.max;
+		rd->u.min <<= rs->u.min;
+	}
+
+	/* check that dreg values are and would remain always positive */
+	if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
+			RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
+		eval_smax_bound(rd, msk);
+	else {
+		rd->s.max <<= rs->u.max;
+		rd->s.min <<= rs->u.min;
+	}
+}
+
+static void
+eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* check if shift value is less then max result bits */
+	if (rs->u.max >= opsz) {
+		eval_max_bound(rd, msk);
+		return;
+	}
+
+	rd->u.max >>= rs->u.min;
+	rd->u.min >>= rs->u.max;
+
+	/* check that dreg values are always positive */
+	if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
+		eval_smax_bound(rd, msk);
+	else {
+		rd->s.max >>= rs->u.min;
+		rd->s.min >>= rs->u.max;
+	}
+}
+
+static void
+eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	uint32_t shv;
+
+	/* check if shift value is less then max result bits */
+	if (rs->u.max >= opsz) {
+		eval_max_bound(rd, msk);
+		return;
+	}
+
+	rd->u.max = (int64_t)rd->u.max >> rs->u.min;
+	rd->u.min = (int64_t)rd->u.min >> rs->u.max;
+
+	/* if we have 32-bit values - extend them to 64-bit */
+	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+		rd->s.min <<= opsz;
+		rd->s.max <<= opsz;
+		shv = opsz;
+	} else
+		shv = 0;
+
+	if (rd->s.min < 0)
+		rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
+	else
+		rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
+
+	if (rd->s.max < 0)
+		rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
+	else
+		rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
+}
+
+static uint64_t
+eval_umax_bits(uint64_t v, size_t opsz)
+{
+	if (v == 0)
+		return 0;
+
+	v = __builtin_clzll(v);
+	return RTE_LEN2MASK(opsz - v, uint64_t);
+}
+
+/* estimate max possible value for (v1 & v2) */
+static uint64_t
+eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
+{
+	v1 = eval_umax_bits(v1, opsz);
+	v2 = eval_umax_bits(v2, opsz);
+	return (v1 & v2);
+}
+
+/* estimate max possible value for (v1 | v2) */
+static uint64_t
+eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
+{
+	v1 = eval_umax_bits(v1, opsz);
+	v2 = eval_umax_bits(v2, opsz);
+	return (v1 | v2);
+}
+
+static void
+eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* both operands are constants */
+	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+		rd->u.min &= rs->u.min;
+		rd->u.max &= rs->u.max;
+	} else {
+		rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
+		rd->u.min &= rs->u.min;
+	}
+
+	/* both operands are constants */
+	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+		rd->s.min &= rs->s.min;
+		rd->s.max &= rs->s.max;
+	/* at least one of operand is non-negative */
+	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
+		rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
+			rs->s.max & (msk >> 1), opsz);
+		rd->s.min &= rs->s.min;
+	} else
+		eval_smax_bound(rd, msk);
+}
+
+static void
+eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* both operands are constants */
+	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+		rd->u.min |= rs->u.min;
+		rd->u.max |= rs->u.max;
+	} else {
+		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
+		rd->u.min |= rs->u.min;
+	}
+
+	/* both operands are constants */
+	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+		rd->s.min |= rs->s.min;
+		rd->s.max |= rs->s.max;
+
+	/* both operands are non-negative */
+	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
+		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
+		rd->s.min |= rs->s.min;
+	} else
+		eval_smax_bound(rd, msk);
+}
+
+static void
+eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* both operands are constants */
+	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+		rd->u.min ^= rs->u.min;
+		rd->u.max ^= rs->u.max;
+	} else {
+		rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
+		rd->u.min = 0;
+	}
+
+	/* both operands are constants */
+	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+		rd->s.min ^= rs->s.min;
+		rd->s.max ^= rs->s.max;
+
+	/* both operands are non-negative */
+	} else if (rd->s.min >= 0 || rs->s.min >= 0) {
+		rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
+		rd->s.min = 0;
+	} else
+		eval_smax_bound(rd, msk);
+}
+
+static void
+eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+	uint64_t msk)
+{
+	/* both operands are constants */
+	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+		rd->u.min = (rd->u.min * rs->u.min) & msk;
+		rd->u.max = (rd->u.max * rs->u.max) & msk;
+	/* check for overflow */
+	} else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
+		rd->u.max *= rs->u.max;
+		rd->u.min *= rd->u.min;
+	} else
+		eval_umax_bound(rd, msk);
+
+	/* both operands are constants */
+	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+		rd->s.min = (rd->s.min * rs->s.min) & msk;
+		rd->s.max = (rd->s.max * rs->s.max) & msk;
+	/* check that both operands are positive and no overflow */
+	} else if (rd->s.min >= 0 && rs->s.min >= 0) {
+		rd->s.max *= rs->s.max;
+		rd->s.min *= rd->s.min;
+	} else
+		eval_smax_bound(rd, msk);
+}
+
 static const char *
-eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
+	size_t opsz, uint64_t msk)
 {
-	if (ins->dst_reg == EBPF_REG_10)
-		return eval_stack(bvf, ins);
+	/* both operands are constants */
+	if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+		if (rs->u.max == 0)
+			return "division by 0";
+		if (op == BPF_DIV) {
+			rd->u.min /= rs->u.min;
+			rd->u.max /= rs->u.max;
+		} else {
+			rd->u.min %= rs->u.min;
+			rd->u.max %= rs->u.max;
+		}
+	} else {
+		if (op == BPF_MOD)
+			rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
+		else
+			rd->u.max = rd->u.max;
+		rd->u.min = 0;
+	}
+
+	/* if we have 32-bit values - extend them to 64-bit */
+	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+		rd->s.min = (int32_t)rd->s.min;
+		rd->s.max = (int32_t)rd->s.max;
+		rs->s.min = (int32_t)rs->s.min;
+		rs->s.max = (int32_t)rs->s.max;
+	}
+
+	/* both operands are constants */
+	if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+		if (rs->s.max == 0)
+			return "division by 0";
+		if (op == BPF_DIV) {
+			rd->s.min /= rs->s.min;
+			rd->s.max /= rs->s.max;
+		} else {
+			rd->s.min %= rs->s.min;
+			rd->s.max %= rs->s.max;
+		}
+	} else if (op == BPF_MOD) {
+		rd->s.min = RTE_MAX(rd->s.max, 0);
+		rd->s.min = RTE_MIN(rd->s.min, 0);
+	} else
+		eval_smax_bound(rd, msk);
+
+	rd->s.max &= msk;
+	rd->s.min &= msk;
+
 	return NULL;
 }
 
+static void
+eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
+{
+	uint64_t ux, uy;
+	int64_t sx, sy;
+
+	/* if we have 32-bit values - extend them to 64-bit */
+	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+		rd->u.min = (int32_t)rd->u.min;
+		rd->u.max = (int32_t)rd->u.max;
+	}
+
+	ux = -(int64_t)rd->u.min & msk;
+	uy = -(int64_t)rd->u.max & msk;
+
+	rd->u.max = RTE_MAX(ux, uy);
+	rd->u.min = RTE_MIN(ux, uy);
+
+	/* if we have 32-bit values - extend them to 64-bit */
+	if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+		rd->s.min = (int32_t)rd->s.min;
+		rd->s.max = (int32_t)rd->s.max;
+	}
+
+	sx = -rd->s.min & msk;
+	sy = -rd->s.max & msk;
+
+	rd->s.max = RTE_MAX(sx, sy);
+	rd->s.min = RTE_MIN(sx, sy);
+}
+
+/*
+ * check that destination and source operand are in defined state.
+ */
+static const char *
+eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
+{
+	if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
+		return "dest reg value is undefined";
+	if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
+		return "src reg value is undefined";
+	return NULL;
+}
+
+static const char *
+eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+	uint64_t msk;
+	uint32_t op;
+	size_t opsz;
+	const char *err;
+	struct bpf_eval_state *st;
+	struct bpf_reg_val *rd, rs;
+
+	opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
+		sizeof(uint32_t) : sizeof(uint64_t);
+	opsz = opsz * CHAR_BIT;
+	msk = RTE_LEN2MASK(opsz, uint64_t);
+
+	st = bvf->evst;
+	rd = st->rv + ins->dst_reg;
+
+	if (BPF_SRC(ins->code) == BPF_X) {
+		rs = st->rv[ins->src_reg];
+		eval_apply_mask(&rs, msk);
+	} else
+		eval_fill_imm(&rs, msk, ins->imm);
+
+	eval_apply_mask(rd, msk);
+
+	op = BPF_OP(ins->code);
+
+	err = eval_defined((op != EBPF_MOV) ? rd : NULL,
+			(op != BPF_NEG) ? &rs : NULL);
+	if (err != NULL)
+		return err;
+
+	if (op == BPF_ADD)
+		eval_add(rd, &rs, msk);
+	else if (op == BPF_SUB)
+		eval_sub(rd, &rs, msk);
+	else if (op == BPF_LSH)
+		eval_lsh(rd, &rs, opsz, msk);
+	else if (op == BPF_RSH)
+		eval_rsh(rd, &rs, opsz, msk);
+	else if (op == EBPF_ARSH)
+		eval_arsh(rd, &rs, opsz, msk);
+	else if (op == BPF_AND)
+		eval_and(rd, &rs, opsz, msk);
+	else if (op == BPF_OR)
+		eval_or(rd, &rs, opsz, msk);
+	else if (op == BPF_XOR)
+		eval_xor(rd, &rs, opsz, msk);
+	else if (op == BPF_MUL)
+		eval_mul(rd, &rs, opsz, msk);
+	else if (op == BPF_DIV || op == BPF_MOD)
+		err = eval_divmod(op, rd, &rs, opsz, msk);
+	else if (op == BPF_NEG)
+		eval_neg(rd, opsz, msk);
+	else if (op == EBPF_MOV)
+		*rd = rs;
+	else
+		eval_max_bound(rd, msk);
+
+	return err;
+}
+
+static const char *
+eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+	uint64_t msk;
+	struct bpf_eval_state *st;
+	struct bpf_reg_val *rd;
+	const char *err;
+
+	msk = RTE_LEN2MASK(ins->imm, uint64_t);
+
+	st = bvf->evst;
+	rd = st->rv + ins->dst_reg;
+
+	err = eval_defined(rd, NULL);
+	if (err != NULL)
+		return err;
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
+		eval_max_bound(rd, msk);
+	else
+		eval_apply_mask(rd, msk);
+#else
+	if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
+		eval_max_bound(rd, msk);
+	else
+		eval_apply_mask(rd, msk);
+#endif
+
+	return NULL;
+}
+
+static const char *
+eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
+	uint32_t align, int16_t off)
+{
+	struct bpf_reg_val rv;
+
+	/* calculate reg + offset */
+	eval_fill_imm(&rv, rm->mask, off);
+	eval_add(rm, &rv, rm->mask);
+
+	if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
+		return "destination is not a pointer";
+
+	if (rm->mask != UINT64_MAX)
+		return "pointer truncation";
+
+	if (rm->u.max + opsz > rm->v.size ||
+			(uint64_t)rm->s.max + opsz > rm->v.size ||
+			rm->s.min < 0)
+		return "memory boundary violation";
+
+	if (rm->u.max % align !=  0)
+		return "unaligned memory access";
+
+	if (rm->v.type == RTE_BPF_ARG_PTR_STACK) {
+
+		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
+				rm->u.max != (uint64_t)rm->s.max)
+			return "stack access with variable offset";
+
+		bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
+
+	/* pointer to mbuf */
+	} else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
+
+		if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
+				rm->u.max != (uint64_t)rm->s.max)
+			return "mbuf access with variable offset";
+	}
+
+	return NULL;
+}
+
+static void
+eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
+{
+	eval_umax_bound(rv, mask);
+
+	/* full 64-bit load */
+	if (mask == UINT64_MAX)
+		eval_smax_bound(rv, mask);
+
+	/* zero-extend load */
+	rv->s.min = rv->u.min;
+	rv->s.max = rv->u.max;
+}
+
+
 static const char *
 eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
 {
-	if (ins->src_reg == EBPF_REG_10)
-		return eval_stack(bvf, ins);
+	uint32_t opsz;
+	uint64_t msk;
+	const char *err;
+	struct bpf_eval_state *st;
+	struct bpf_reg_val *rd, rs;
+	const struct bpf_reg_val *sv;
+
+	st = bvf->evst;
+	rd = st->rv + ins->dst_reg;
+	rs = st->rv[ins->src_reg];
+	opsz = bpf_size(BPF_SIZE(ins->code));
+	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
+
+	err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
+	if (err != NULL)
+		return err;
+
+	if (rs.v.type == RTE_BPF_ARG_PTR_STACK) {
+
+		sv = st->sv + rs.u.max / sizeof(uint64_t);
+		if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
+			return "undefined value on the stack";
+
+		*rd = *sv;
+
+	/* pointer to mbuf */
+	} else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
+
+		if (rs.u.max == offsetof(struct rte_mbuf, next)) {
+			eval_fill_imm(rd, msk, 0);
+			rd->v = rs.v;
+		} else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
+			eval_fill_imm(rd, msk, 0);
+			rd->v.type = RTE_BPF_ARG_PTR;
+			rd->v.size = rs.v.buf_size;
+		} else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
+			eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
+			rd->v.type = RTE_BPF_ARG_RAW;
+		} else {
+			eval_max_load(rd, msk);
+			rd->v.type = RTE_BPF_ARG_RAW;
+		}
+
+	/* pointer to raw data */
+	} else {
+		eval_max_load(rd, msk);
+		rd->v.type = RTE_BPF_ARG_RAW;
+	}
+
 	return NULL;
 }
 
 static const char *
+eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
+{
+	uint32_t i;
+
+	static const struct {
+		size_t off;
+		size_t sz;
+	} mbuf_ro_fileds[] = {
+		{ .off = offsetof(struct rte_mbuf, buf_addr), },
+		{ .off = offsetof(struct rte_mbuf, refcnt), },
+		{ .off = offsetof(struct rte_mbuf, nb_segs), },
+		{ .off = offsetof(struct rte_mbuf, buf_len), },
+		{ .off = offsetof(struct rte_mbuf, pool), },
+		{ .off = offsetof(struct rte_mbuf, next), },
+		{ .off = offsetof(struct rte_mbuf, priv_size), },
+	};
+
+	for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
+			(mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
+			rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
+			i++)
+		;
+
+	if (i != RTE_DIM(mbuf_ro_fileds))
+		return "store to the read-only mbuf field";
+
+	return NULL;
+
+}
+
+static const char *
+eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+	uint32_t opsz;
+	uint64_t msk;
+	const char *err;
+	struct bpf_eval_state *st;
+	struct bpf_reg_val rd, rs, *sv;
+
+	opsz = bpf_size(BPF_SIZE(ins->code));
+	msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
+
+	st = bvf->evst;
+	rd = st->rv[ins->dst_reg];
+
+	if (BPF_CLASS(ins->code) == BPF_STX) {
+		rs = st->rv[ins->src_reg];
+		eval_apply_mask(&rs, msk);
+	} else
+		eval_fill_imm(&rs, msk, ins->imm);
+
+	err = eval_defined(NULL, &rs);
+	if (err != NULL)
+		return err;
+
+	err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
+	if (err != NULL)
+		return err;
+
+	if (rd.v.type == RTE_BPF_ARG_PTR_STACK) {
+
+		sv = st->sv + rd.u.max / sizeof(uint64_t);
+		if (BPF_CLASS(ins->code) == BPF_STX &&
+				BPF_MODE(ins->code) == EBPF_XADD)
+			eval_max_bound(sv, msk);
+		else
+			*sv = rs;
+
+	/* pointer to mbuf */
+	} else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
+		err = eval_mbuf_store(&rd, opsz);
+		if (err != NULL)
+			return err;
+	}
+
+	return NULL;
+}
+
+static const char *
+eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
+	struct bpf_reg_val *rv)
+{
+	uint32_t i, n;
+	struct bpf_eval_state *st;
+	const char *err;
+
+	st = bvf->evst;
+
+	if (rv->v.type == RTE_BPF_ARG_UNDEF)
+		return "Undefined argument type";
+
+	if (arg->type != rv->v.type &&
+			arg->type != RTE_BPF_ARG_RAW &&
+			(arg->type != RTE_BPF_ARG_PTR ||
+			RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
+		return "Invalid argument type";
+
+	err = NULL;
+
+	/* argument is a pointer */
+	if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
+
+		err = eval_ptr(bvf, rv, arg->size, 1, 0);
+
+		/*
+		 * pointer to the variable on the stack is passed
+		 * as an argument, mark stack space it occupies as initialized.
+		 */
+		if (err == NULL && rv->v.type == RTE_BPF_ARG_PTR_STACK) {
+
+			i = rv->u.max / sizeof(uint64_t);
+			n = i + arg->size / sizeof(uint64_t);
+			while (i != n) {
+				eval_fill_max_bound(st->sv + i, UINT64_MAX);
+				i++;
+			};
+		}
+	}
+
+	return err;
+}
+
+static const char *
 eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
 {
-	uint32_t idx;
+	uint64_t msk;
+	uint32_t i, idx;
+	struct bpf_reg_val *rv;
+	const struct rte_bpf_xsym *xsym;
+	const char *err;
 
 	idx = ins->imm;
 
@@ -145,6 +940,144 @@ eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
 	/* for now don't support function calls on 32 bit platform */
 	if (sizeof(uint64_t) != sizeof(uintptr_t))
 		return "function calls are supported only for 64 bit apps";
+
+	xsym = bvf->prm->xsym + idx;
+
+	/* evaluate function arguments */
+	err = NULL;
+	for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
+		err = eval_func_arg(bvf, xsym->func.args + i,
+			bvf->evst->rv + EBPF_REG_1 + i);
+	}
+
+	/* R1-R5 argument/scratch registers */
+	for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
+		bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
+
+	/* update return value */
+
+	rv = bvf->evst->rv + EBPF_REG_0;
+	rv->v = xsym->func.ret;
+	msk = (rv->v.type == RTE_BPF_ARG_RAW) ?
+		RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t) : UINTPTR_MAX;
+	eval_max_bound(rv, msk);
+	rv->mask = msk;
+
+	return err;
+}
+
+static void
+eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
+{
+	/* sreg is constant */
+	if (trs->u.min == trs->u.max) {
+		trd->u = trs->u;
+	/* dreg is constant */
+	} else if (trd->u.min == trd->u.max) {
+		trs->u = trd->u;
+	} else {
+		trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
+		trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
+		trs->u = trd->u;
+	}
+
+	/* sreg is constant */
+	if (trs->s.min == trs->s.max) {
+		trd->s = trs->s;
+	/* dreg is constant */
+	} else if (trd->s.min == trd->s.max) {
+		trs->s = trd->s;
+	} else {
+		trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
+		trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
+		trs->s = trd->s;
+	}
+}
+
+static void
+eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+	frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
+	trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
+}
+
+static void
+eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+	frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
+	trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
+}
+
+static void
+eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+	frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
+	trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
+}
+
+static void
+eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+	struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+	frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
+	trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
+}
+
+static const char *
+eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+	uint32_t op;
+	const char *err;
+	struct bpf_eval_state *fst, *tst;
+	struct bpf_reg_val *frd, *frs, *trd, *trs;
+	struct bpf_reg_val rvf, rvt;
+
+	tst = bvf->evst;
+	fst = bvf->evin->evst;
+
+	frd = fst->rv + ins->dst_reg;
+	trd = tst->rv + ins->dst_reg;
+
+	if (BPF_SRC(ins->code) == BPF_X) {
+		frs = fst->rv + ins->src_reg;
+		trs = tst->rv + ins->src_reg;
+	} else {
+		frs = &rvf;
+		trs = &rvt;
+		eval_fill_imm(frs, UINT64_MAX, ins->imm);
+		eval_fill_imm(trs, UINT64_MAX, ins->imm);
+	}
+
+	err = eval_defined(trd, trs);
+	if (err != NULL)
+		return err;
+
+	op = BPF_OP(ins->code);
+
+	if (op == BPF_JEQ)
+		eval_jeq_jne(trd, trs);
+	else if (op == EBPF_JNE)
+		eval_jeq_jne(frd, frs);
+	else if (op == BPF_JGT)
+		eval_jgt_jle(trd, trs, frd, frs);
+	else if (op == EBPF_JLE)
+		eval_jgt_jle(frd, frs, trd, trs);
+	else if (op == EBPF_JLT)
+		eval_jlt_jge(trd, trs, frd, frs);
+	else if (op == BPF_JGE)
+		eval_jlt_jge(frd, frs, trd, trs);
+	else if (op == EBPF_JSGT)
+		eval_jsgt_jsle(trd, trs, frd, frs);
+	else if (op == EBPF_JSLE)
+		eval_jsgt_jsle(frd, frs, trd, trs);
+	else if (op == EBPF_JLT)
+		eval_jslt_jsge(trd, trs, frd, frs);
+	else if (op == EBPF_JSGE)
+		eval_jslt_jsge(frd, frs, trd, trs);
+
 	return NULL;
 }
 
@@ -157,256 +1090,306 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_SUB | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_AND | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_OR | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_LSH | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_RSH | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_XOR | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_MUL | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | EBPF_MOV | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_DIV | BPF_K)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 1, .max = UINT32_MAX},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_MOD | BPF_K)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 1, .max = UINT32_MAX},
+		.eval = eval_alu,
 	},
 	/* ALU IMM 64-bit instructions */
 	[(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_AND | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_OR | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
 		.mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX,},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 1, .max = UINT32_MAX},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 1, .max = UINT32_MAX},
+		.eval = eval_alu,
 	},
 	/* ALU REG 32-bit instructions */
 	[(BPF_ALU | BPF_ADD | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_SUB | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_AND | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_OR | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_LSH | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_RSH | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_XOR | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_MUL | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_DIV | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_MOD | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | EBPF_MOV | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | BPF_NEG)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 16, .max = 64},
 		.check = check_alu_bele,
+		.eval = eval_bele,
 	},
 	[(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 16, .max = 64},
 		.check = check_alu_bele,
+		.eval = eval_bele,
 	},
 	/* ALU REG 64-bit instructions */
 	[(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_AND | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_OR | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	[(EBPF_ALU64 | BPF_NEG)] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_alu,
 	},
 	/* load instructions */
 	[(BPF_LDX | BPF_MEM | BPF_B)] = {
@@ -438,6 +1421,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
 		.mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_ld_imm64,
 	},
 	/* store REG instructions */
 	[(BPF_STX | BPF_MEM | BPF_B)] = {
@@ -513,92 +1497,110 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JNE | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JGT | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JLT | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JGE | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JLE | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSGT | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSLT | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSGE | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSLE | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JSET | BPF_K)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = UINT32_MAX},
+		.eval = eval_jcc,
 	},
 	/* jcc REG instructions */
 	[(BPF_JMP | BPF_JEQ | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JNE | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JGT | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JLT | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JGE | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JLE | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSGT | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSLT | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
@@ -609,16 +1611,19 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | EBPF_JSLE | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	[(BPF_JMP | BPF_JSET | BPF_X)] = {
 		.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
 		.off = { .min = 0, .max = UINT16_MAX},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_jcc,
 	},
 	/* call instruction */
 	[(BPF_JMP | EBPF_CALL)] = {
@@ -632,6 +1637,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
 		.mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
 		.off = { .min = 0, .max = 0},
 		.imm = { .min = 0, .max = 0},
+		.eval = eval_exit,
 	},
 };
 
@@ -1046,7 +2052,7 @@ save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 	st = pull_eval_state(bvf);
 	if (st == NULL) {
 		RTE_BPF_LOG(ERR,
-			"%s: internal error (out of space) at pc: %u",
+			"%s: internal error (out of space) at pc: %u\n",
 			__func__, get_node_idx(bvf, node));
 		return -ENOMEM;
 	}
@@ -1078,6 +2084,32 @@ restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 	push_eval_state(bvf);
 }
 
+static void
+log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
+	uint32_t pc, int32_t loglvl)
+{
+	const struct bpf_eval_state *st;
+	const struct bpf_reg_val *rv;
+
+	rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
+
+	st = bvf->evst;
+	rv = st->rv + ins->dst_reg;
+
+	rte_log(loglvl, rte_bpf_logtype,
+		"r%u={\n"
+		"\tv={type=%u, size=%zu},\n"
+		"\tmask=0x%" PRIx64 ",\n"
+		"\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
+		"\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
+		"};\n",
+		ins->dst_reg,
+		rv->v.type, rv->v.size,
+		rv->mask,
+		rv->u.min, rv->u.max,
+		rv->s.min, rv->s.max);
+}
+
 /*
  * Do second pass through CFG and try to evaluate instructions
  * via each possible path.
@@ -1096,23 +2128,56 @@ evaluate(struct bpf_verifier *bvf)
 	const struct ebpf_insn *ins;
 	struct inst_node *next, *node;
 
-	node = bvf->in;
+	/* initial state of frame pointer */
+	static const struct bpf_reg_val rvfp = {
+		.v = {
+			.type = RTE_BPF_ARG_PTR_STACK,
+			.size = MAX_BPF_STACK_SIZE,
+		},
+		.mask = UINT64_MAX,
+		.u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
+		.s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
+	};
+
+	bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
+	bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
+	if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
+		eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
+
+	bvf->evst->rv[EBPF_REG_10] = rvfp;
+
 	ins = bvf->prm->ins;
+	node = bvf->in;
+	next = node;
 	rc = 0;
 
 	while (node != NULL && rc == 0) {
 
-		/* current node evaluation */
-		idx = get_node_idx(bvf, node);
-		op = ins[idx].code;
+		/*
+		 * current node evaluation, make sure we evaluate
+		 * each node only once.
+		 */
+		if (next != NULL) {
+
+			bvf->evin = node;
+			idx = get_node_idx(bvf, node);
+			op = ins[idx].code;
 
-		if (ins_chk[op].eval != NULL) {
-			err = ins_chk[op].eval(bvf, ins + idx);
-			if (err != NULL) {
-				RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
-					__func__, err, idx);
-				rc = -EINVAL;
+			/* for jcc node make a copy of evaluatoion state */
+			if (node->nb_edge > 1)
+				rc |= save_eval_state(bvf, node);
+
+			if (ins_chk[op].eval != NULL && rc == 0) {
+				err = ins_chk[op].eval(bvf, ins + idx);
+				if (err != NULL) {
+					RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+						__func__, err, idx);
+					rc = -EINVAL;
+				}
 			}
+
+			log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
+			bvf->evin = NULL;
 		}
 
 		/* proceed through CFG */
@@ -1120,9 +2185,8 @@ evaluate(struct bpf_verifier *bvf)
 		if (next != NULL) {
 
 			/* proceed with next child */
-			if (node->cur_edge != node->nb_edge)
-				rc |= save_eval_state(bvf, node);
-			else if (node->evst != NULL)
+			if (node->cur_edge == node->nb_edge &&
+					node->evst != NULL)
 				restore_eval_state(bvf, node);
 
 			next->prev_node = get_node_idx(bvf, node);
-- 
2.13.6

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-dev] [PATCH 3/3] test/bpf: add new test-case for external function call
  2018-06-08  8:42 [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 1/3] bpf: add extra information for external symbol definitions Konstantin Ananyev
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 2/3] bpf: add extra validation for input BPF program Konstantin Ananyev
@ 2018-06-08  8:42 ` Konstantin Ananyev
  2018-07-12  7:46 ` [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Thomas Monjalon
  3 siblings, 0 replies; 5+ messages in thread
From: Konstantin Ananyev @ 2018-06-08  8:42 UTC (permalink / raw)
  To: dev, dev; +Cc: Konstantin Ananyev

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 test/test/test_bpf.c | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 150 insertions(+)

diff --git a/test/test/test_bpf.c b/test/test/test_bpf.c
index 1e9caef95..fa17c4f74 100644
--- a/test/test/test_bpf.c
+++ b/test/test/test_bpf.c
@@ -1551,6 +1551,138 @@ static const struct rte_bpf_xsym test_call1_xsym[] = {
 	},
 };
 
+static const struct ebpf_insn test_call2_prog[] = {
+
+	{
+		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+		.dst_reg = EBPF_REG_1,
+		.src_reg = EBPF_REG_10,
+	},
+	{
+		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+		.dst_reg = EBPF_REG_1,
+		.imm = -(int32_t)sizeof(struct dummy_offset),
+	},
+	{
+		.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+		.dst_reg = EBPF_REG_2,
+		.src_reg = EBPF_REG_10,
+	},
+	{
+		.code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+		.dst_reg = EBPF_REG_2,
+		.imm = -2 * (int32_t)sizeof(struct dummy_offset),
+	},
+	{
+		.code = (BPF_JMP | EBPF_CALL),
+		.imm = 0,
+	},
+	{
+		.code = (BPF_LDX | BPF_MEM | EBPF_DW),
+		.dst_reg = EBPF_REG_1,
+		.src_reg = EBPF_REG_10,
+		.off = -(int32_t)(sizeof(struct dummy_offset) -
+			offsetof(struct dummy_offset, u64)),
+	},
+	{
+		.code = (BPF_LDX | BPF_MEM | BPF_W),
+		.dst_reg = EBPF_REG_0,
+		.src_reg = EBPF_REG_10,
+		.off = -(int32_t)(sizeof(struct dummy_offset) -
+			offsetof(struct dummy_offset, u32)),
+	},
+	{
+		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+		.dst_reg = EBPF_REG_0,
+		.src_reg = EBPF_REG_1,
+	},
+	{
+		.code = (BPF_LDX | BPF_MEM | BPF_H),
+		.dst_reg = EBPF_REG_1,
+		.src_reg = EBPF_REG_10,
+		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+			offsetof(struct dummy_offset, u16)),
+	},
+	{
+		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+		.dst_reg = EBPF_REG_0,
+		.src_reg = EBPF_REG_1,
+	},
+	{
+		.code = (BPF_LDX | BPF_MEM | BPF_B),
+		.dst_reg = EBPF_REG_1,
+		.src_reg = EBPF_REG_10,
+		.off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+			offsetof(struct dummy_offset, u8)),
+	},
+	{
+		.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+		.dst_reg = EBPF_REG_0,
+		.src_reg = EBPF_REG_1,
+	},
+	{
+		.code = (BPF_JMP | EBPF_EXIT),
+	},
+
+};
+
+static void
+dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
+{
+	uint64_t v;
+
+	v = 0;
+	a->u64 = v++;
+	a->u32 = v++;
+	a->u16 = v++;
+	a->u8 = v++;
+	b->u64 = v++;
+	b->u32 = v++;
+	b->u16 = v++;
+	b->u8 = v++;
+}
+
+static int
+test_call2_check(uint64_t rc, const void *arg)
+{
+	uint64_t v;
+	struct dummy_offset a, b;
+
+	RTE_SET_USED(arg);
+
+	dummy_func2(&a, &b);
+	v = a.u64 + a.u32 + b.u16 + b.u8;
+
+	if (v != rc) {
+		printf("%s@%d: invalid return value "
+			"expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
+			__func__, __LINE__, v, rc);
+		return -1;
+	}
+	return 0;
+}
+
+static const struct rte_bpf_xsym test_call2_xsym[] = {
+	{
+		.name = RTE_STR(dummy_func2),
+		.type = RTE_BPF_XTYPE_FUNC,
+		.func = {
+			.val = (void *)dummy_func2,
+			.nb_args = 2,
+			.args = {
+				[0] = {
+					.type = RTE_BPF_ARG_PTR,
+					.size = sizeof(struct dummy_offset),
+				},
+				[1] = {
+					.type = RTE_BPF_ARG_PTR,
+					.size = sizeof(struct dummy_offset),
+				},
+			},
+		},
+	},
+};
+
 static const struct bpf_test tests[] = {
 	{
 		.name = "test_store1",
@@ -1710,6 +1842,24 @@ static const struct bpf_test tests[] = {
 		/* for now don't support function calls on 32 bit platform */
 		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
 	},
+	{
+		.name = "test_call2",
+		.arg_sz = sizeof(struct dummy_offset),
+		.prm = {
+			.ins = test_call2_prog,
+			.nb_ins = RTE_DIM(test_call2_prog),
+			.prog_arg = {
+				.type = RTE_BPF_ARG_PTR,
+				.size = sizeof(struct dummy_offset),
+			},
+			.xsym = test_call2_xsym,
+			.nb_xsym = RTE_DIM(test_call2_xsym),
+		},
+		.prepare = test_store1_prepare,
+		.check_result = test_call2_check,
+		/* for now don't support function calls on 32 bit platform */
+		.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+	},
 };
 
 static int
-- 
2.13.6

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs
  2018-06-08  8:42 [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Konstantin Ananyev
                   ` (2 preceding siblings ...)
  2018-06-08  8:42 ` [dpdk-dev] [PATCH 3/3] test/bpf: add new test-case for external function call Konstantin Ananyev
@ 2018-07-12  7:46 ` Thomas Monjalon
  3 siblings, 0 replies; 5+ messages in thread
From: Thomas Monjalon @ 2018-07-12  7:46 UTC (permalink / raw)
  To: Konstantin Ananyev; +Cc: dev

> Konstantin Ananyev (3):
>   bpf: add extra information for external symbol definitions
>   bpf: add extra validation for input BPF program
>   test/bpf: add new test-case for external function call

Applied, thanks

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-07-12  7:46 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-08  8:42 [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Konstantin Ananyev
2018-06-08  8:42 ` [dpdk-dev] [PATCH 1/3] bpf: add extra information for external symbol definitions Konstantin Ananyev
2018-06-08  8:42 ` [dpdk-dev] [PATCH 2/3] bpf: add extra validation for input BPF program Konstantin Ananyev
2018-06-08  8:42 ` [dpdk-dev] [PATCH 3/3] test/bpf: add new test-case for external function call Konstantin Ananyev
2018-07-12  7:46 ` [dpdk-dev] [PATCH 0/3] bpf: extend validation of input BPF programs Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).