* [PATCH 0/2] fix IP-in-IP tunnels recognition
@ 2024-02-29 16:05 Gregory Etelson
2024-02-29 16:05 ` [PATCH 1/2] net/mlx5: remove code duplications Gregory Etelson
2024-02-29 16:05 ` [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition Gregory Etelson
0 siblings, 2 replies; 4+ messages in thread
From: Gregory Etelson @ 2024-02-29 16:05 UTC (permalink / raw)
To: dev; +Cc: getelson, mkashani, rasland, Ori Kam
fix IP-in-IP tunnels validation and recognition
Gregory Etelson (2):
net/mlx5: remove code duplications
net/mlx5: fix IP-in-IP tunnels recognition
drivers/net/mlx5/mlx5_flow_dv.c | 243 +++++++++++++++-----------------
1 file changed, 117 insertions(+), 126 deletions(-)
Acked-by: Ori Kam <orika@nvidia.com>
--
2.39.2
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] net/mlx5: remove code duplications
2024-02-29 16:05 [PATCH 0/2] fix IP-in-IP tunnels recognition Gregory Etelson
@ 2024-02-29 16:05 ` Gregory Etelson
2024-02-29 16:05 ` [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition Gregory Etelson
1 sibling, 0 replies; 4+ messages in thread
From: Gregory Etelson @ 2024-02-29 16:05 UTC (permalink / raw)
To: dev
Cc: getelson, mkashani, rasland, stable, Ori Kam, Dariusz Sosnowski,
Viacheslav Ovsiienko, Suanming Mou, Matan Azrad, Yongseok Koh,
Shahaf Shuler
Remove code duplications in DV L3 items validation translation.
Fixes: 3193c2494eea ("net/mlx5: fix L4 protocol validation")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 151 +++++++++-----------------------
1 file changed, 43 insertions(+), 108 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 18f09b22be..fe0a06f364 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7488,6 +7488,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev,
return 0;
}
+static __rte_always_inline uint8_t
+mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item,
+ enum MLX5_SET_MATCHER key_type)
+{
+#define MLX5_L3_NEXT_PROTOCOL(i, ms) \
+ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \
+ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \
+ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \
+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \
+ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\
+ 0xff)
+
+ uint8_t next_protocol;
+
+ if (l3_item->mask != NULL && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ if (next_protocol)
+ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ else
+ next_protocol = 0xff;
+ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask);
+ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) {
+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec);
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ return next_protocol;
+
+#undef MLX5_L3_NEXT_PROTOCOL
+}
+
/**
* Validate IB BTH item.
*
@@ -7770,19 +7804,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -7796,22 +7819,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- item_ipv6_proto =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
ret = flow_dv_validate_item_ipv6_frag_ext(items,
@@ -7822,19 +7831,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
last_item = tunnel ?
MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
@@ -13997,28 +13995,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
mlx5_flow_tunnel_ip_check(items, next_protocol,
@@ -14028,56 +14005,14 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->mask))->hdr.proto;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6 *)
- (items->spec))->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
flow_dv_translate_item_ipv6_frag_ext
(key, items, tunnel, key_type);
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- items->spec != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_M &&
- items->mask != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->mask))->hdr.next_header;
- } else if (key_type == MLX5_SET_MATCHER_HS_V &&
- items->spec != NULL) {
- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *)
- (items->spec))->hdr.next_header;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
break;
case RTE_FLOW_ITEM_TYPE_TCP:
flow_dv_translate_item_tcp(key, items, tunnel, key_type);
--
2.39.2
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition
2024-02-29 16:05 [PATCH 0/2] fix IP-in-IP tunnels recognition Gregory Etelson
2024-02-29 16:05 ` [PATCH 1/2] net/mlx5: remove code duplications Gregory Etelson
@ 2024-02-29 16:05 ` Gregory Etelson
2024-03-13 7:39 ` Raslan Darawsheh
1 sibling, 1 reply; 4+ messages in thread
From: Gregory Etelson @ 2024-02-29 16:05 UTC (permalink / raw)
To: dev
Cc: getelson, mkashani, rasland, Ori Kam, Dariusz Sosnowski,
Viacheslav Ovsiienko, Suanming Mou, Matan Azrad, Yongseok Koh
The patch fixes IP-in-IP tunnel recognition for the following patterns
/ [ipv4|ipv6] proto is [ipv4|ipv6] / end
/ [ipv4|ipv6] / [ipv4|ipv6] /
Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 104 ++++++++++++++++++++++++--------
1 file changed, 80 insertions(+), 24 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index fe0a06f364..92a5b7b503 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -275,21 +275,41 @@ struct field_modify_info modify_tcp[] = {
{0, 0, 0},
};
-static void
+enum mlx5_l3_tunnel_detection {
+ l3_tunnel_none,
+ l3_tunnel_outer,
+ l3_tunnel_inner
+};
+
+static enum mlx5_l3_tunnel_detection
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
- uint8_t next_protocol, uint64_t *item_flags,
- int *tunnel)
+ uint8_t next_protocol, uint64_t item_flags,
+ uint64_t *l3_tunnel_flag)
{
+ enum mlx5_l3_tunnel_detection td = l3_tunnel_none;
+
MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
item->type == RTE_FLOW_ITEM_TYPE_IPV6);
- if (next_protocol == IPPROTO_IPIP) {
- *item_flags |= MLX5_FLOW_LAYER_IPIP;
- *tunnel = 1;
- }
- if (next_protocol == IPPROTO_IPV6) {
- *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
- *tunnel = 1;
+ if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) {
+ switch (next_protocol) {
+ case IPPROTO_IPIP:
+ td = l3_tunnel_outer;
+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP;
+ break;
+ case IPPROTO_IPV6:
+ td = l3_tunnel_outer;
+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP;
+ break;
+ default:
+ break;
+ }
+ } else {
+ td = l3_tunnel_inner;
+ *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ?
+ MLX5_FLOW_LAYER_IPIP :
+ MLX5_FLOW_LAYER_IPV6_ENCAP;
}
+ return td;
}
static inline struct mlx5_hlist *
@@ -7718,6 +7738,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
is_root = (uint64_t)ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ enum mlx5_l3_tunnel_detection l3_tunnel_detection;
+ uint64_t l3_tunnel_flag;
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int type = items->type;
@@ -7795,8 +7817,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
vlan_m = items->mask;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
last_item, ether_type,
error);
@@ -7804,12 +7834,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- next_protocol = mlx5_flow_l3_next_protocol
- (items, (enum MLX5_SET_MATCHER)-1);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol
+ (items, (enum MLX5_SET_MATCHER)-1);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
ether_type,
@@ -7819,8 +7857,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- next_protocol = mlx5_flow_l3_next_protocol
- (items, (enum MLX5_SET_MATCHER)-1);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
ret = flow_dv_validate_item_ipv6_frag_ext(items,
@@ -13945,6 +13983,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
uint64_t last_item = wks->last_item;
+ enum mlx5_l3_tunnel_detection l3_tunnel_detection;
+ uint64_t l3_tunnel_flag;
int ret;
switch (item_type) {
@@ -13988,24 +14028,40 @@ flow_dv_translate_items(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_VLAN);
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &wks->item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ wks->item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ wks->item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
flow_dv_translate_item_ipv4(key, items, tunnel,
wks->group, key_type);
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ wks->item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &wks->item_flags, &tunnel);
+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ l3_tunnel_detection =
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ wks->item_flags,
+ &l3_tunnel_flag);
+ if (l3_tunnel_detection == l3_tunnel_inner) {
+ wks->item_flags |= l3_tunnel_flag;
+ tunnel = 1;
+ }
flow_dv_translate_item_ipv6(key, items, tunnel,
wks->group, key_type);
wks->priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- next_protocol = mlx5_flow_l3_next_protocol(items, key_type);
+ if (l3_tunnel_detection == l3_tunnel_outer)
+ wks->item_flags |= l3_tunnel_flag;
break;
case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
flow_dv_translate_item_ipv6_frag_ext
--
2.39.2
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition
2024-02-29 16:05 ` [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition Gregory Etelson
@ 2024-03-13 7:39 ` Raslan Darawsheh
0 siblings, 0 replies; 4+ messages in thread
From: Raslan Darawsheh @ 2024-03-13 7:39 UTC (permalink / raw)
To: Gregory Etelson, dev
Cc: Maayan Kashani, Ori Kam, Dariusz Sosnowski, Slava Ovsiienko,
Suanming Mou, Matan Azrad, Yongseok Koh
Hi,
> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Thursday, February 29, 2024 6:05 PM
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Maayan Kashani
> <mkashani@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Ori Kam
> <orika@nvidia.com>; Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Suanming Mou
> <suanmingm@nvidia.com>; Matan Azrad <matan@nvidia.com>; Yongseok
> Koh <yskoh@mellanox.com>
> Subject: [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition
>
> The patch fixes IP-in-IP tunnel recognition for the following patterns
>
> / [ipv4|ipv6] proto is [ipv4|ipv6] / end
>
> / [ipv4|ipv6] / [ipv4|ipv6] /
>
> Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function")
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Acked-by: Ori Kam <orika@nvidia.com>
Series applied to next-net-mlx,
Kindest regards
Raslan Darawsheh
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-03-13 7:39 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-29 16:05 [PATCH 0/2] fix IP-in-IP tunnels recognition Gregory Etelson
2024-02-29 16:05 ` [PATCH 1/2] net/mlx5: remove code duplications Gregory Etelson
2024-02-29 16:05 ` [PATCH 2/2] net/mlx5: fix IP-in-IP tunnels recognition Gregory Etelson
2024-03-13 7:39 ` Raslan Darawsheh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).