* [PATCH] net/mlx5/hws: fix ESP header match in strict mode @ 2025-08-04 5:05 Viacheslav Ovsiienko 2025-08-08 7:30 ` Dariusz Sosnowski ` (2 more replies) 0 siblings, 3 replies; 8+ messages in thread From: Viacheslav Ovsiienko @ 2025-08-04 5:05 UTC (permalink / raw) To: dev; +Cc: rasland, matan, suanmingm, dsosnowski, stable The pattern like "eth / ipv6 / esp / end" matched on any IPv6 packet in strict mode, because there was no impicit match on the IP.proto forced. This patch adds the implicit match on IP.proto with value 50 (ESP) and adds implicit match on UDP.dport with value 4500 for the case ESP over UDP. Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash") Cc: stable@dpdk.org Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> --- drivers/net/mlx5/hws/mlx5dr_definer.c | 38 ++++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 7464d95373..113feae291 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -14,6 +14,7 @@ #define UDP_VXLAN_PORT 4789 #define UDP_VXLAN_GPE_PORT 4790 #define UDP_GTPU_PORT 2152 +#define UDP_ESP_PORT 4500 #define UDP_PORT_MPLS 6635 #define UDP_GENEVE_PORT 6081 #define UDP_ROCEV2_PORT 4791 @@ -231,6 +232,8 @@ struct mlx5dr_definer_conv_data { X(SET_BE16, nvgre_protocol, v->protocol, rte_flow_item_nvgre) \ X(SET_BE32P, nvgre_dw1, &v->tni[0], rte_flow_item_nvgre) \ X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \ + X(SET, ipsec_protocol, IPPROTO_ESP, rte_flow_item_esp) \ + X(SET, ipsec_udp_port, UDP_ESP_PORT, rte_flow_item_esp) \ X(SET_BE32, ipsec_spi, v->hdr.spi, rte_flow_item_esp) \ X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp) \ X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \ @@ -2481,7 +2484,9 @@ mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd, } static struct mlx5dr_definer_fc * -mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t byte_off) +mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, + uint32_t byte_off, + int item_idx) { uint32_t byte_off_fp7 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_7); uint32_t byte_off_fp0 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0); @@ -2493,6 +2498,33 @@ mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t rte_errno = EINVAL; return NULL; } + + /* To match on ESP we must match on ip_protocol and optionally on l4_dport */ + if (!cd->relaxed) { + bool over_udp; + + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; + over_udp = fc->tag_set == &mlx5dr_definer_udp_protocol_set; + + if (over_udp) { + fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + fc->tag_set = &mlx5dr_definer_ipsec_udp_port_set; + DR_CALC_SET(fc, eth_l4, destination_port, false); + } + } else { + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ipsec_protocol_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l3, protocol_next_header, false); + } + } + } + idx = (byte_off_fp0 - byte_off) / (sizeof(uint32_t)); fname += (enum mlx5dr_definer_fname)idx; fc = &cd->fc[fname]; @@ -2544,7 +2576,7 @@ mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd, if (m->hdr.next_hdr || m->hdr.type || m->hdr.segments_left) { byte_off = flow_hw_get_srh_flex_parser_byte_off_from_ctx(cd->ctx); - fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off); + fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off, item_idx); if (!fc) return rte_errno; @@ -2666,7 +2698,7 @@ mlx5dr_definer_conv_item_ecpri(struct mlx5dr_definer_conv_data *cd, if (!mask) continue; mask = htobe32(mask); - fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off[i]); + fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off[i], item_idx); if (!fc) return rte_errno; -- 2.34.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] net/mlx5/hws: fix ESP header match in strict mode 2025-08-04 5:05 [PATCH] net/mlx5/hws: fix ESP header match in strict mode Viacheslav Ovsiienko @ 2025-08-08 7:30 ` Dariusz Sosnowski 2025-08-18 6:33 ` Raslan Darawsheh 2025-09-09 6:28 ` [PATCH v2 1/3] net/mlx5: " Maayan Kashani 2 siblings, 0 replies; 8+ messages in thread From: Dariusz Sosnowski @ 2025-08-08 7:30 UTC (permalink / raw) To: Viacheslav Ovsiienko; +Cc: dev, rasland, matan, suanmingm, stable On Mon, Aug 04, 2025 at 08:05:14AM +0300, Viacheslav Ovsiienko wrote: > The pattern like "eth / ipv6 / esp / end" matched on any IPv6 > packet in strict mode, because there was no impicit match on the > IP.proto forced. > > This patch adds the implicit match on IP.proto with value 50 (ESP) > and adds implicit match on UDP.dport with value 4500 for the case > ESP over UDP. > > Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash") > Cc: stable@dpdk.org > > Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com> Best regards, Dariusz Sosnowski ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] net/mlx5/hws: fix ESP header match in strict mode 2025-08-04 5:05 [PATCH] net/mlx5/hws: fix ESP header match in strict mode Viacheslav Ovsiienko 2025-08-08 7:30 ` Dariusz Sosnowski @ 2025-08-18 6:33 ` Raslan Darawsheh 2025-09-08 12:12 ` Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 1/3] net/mlx5: " Maayan Kashani 2 siblings, 1 reply; 8+ messages in thread From: Raslan Darawsheh @ 2025-08-18 6:33 UTC (permalink / raw) To: Viacheslav Ovsiienko, dev; +Cc: matan, suanmingm, dsosnowski, stable Hi, On 04/08/2025 8:05 AM, Viacheslav Ovsiienko wrote: > The pattern like "eth / ipv6 / esp / end" matched on any IPv6 > packet in strict mode, because there was no impicit match on the > IP.proto forced. > > This patch adds the implicit match on IP.proto with value 50 (ESP) > and adds implicit match on UDP.dport with value 4500 for the case > ESP over UDP. > > Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash") > Cc: stable@dpdk.org > > Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> Patch applied to next-net-mlx, Kindest regards Raslan Darawsheh ^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH] net/mlx5/hws: fix ESP header match in strict mode 2025-08-18 6:33 ` Raslan Darawsheh @ 2025-09-08 12:12 ` Maayan Kashani 2025-09-08 12:26 ` Raslan Darawsheh 0 siblings, 1 reply; 8+ messages in thread From: Maayan Kashani @ 2025-09-08 12:12 UTC (permalink / raw) To: Raslan Darawsheh, Slava Ovsiienko, dev Cc: Matan Azrad, Suanming Mou, Dariusz Sosnowski, stable Please drop this patch, the merge was incorrect and the code is incorrect. Regards, Maayan Kashani > -----Original Message----- > From: Raslan Darawsheh <rasland@nvidia.com> > Sent: Monday, 18 August 2025 9:33 > To: Slava Ovsiienko <viacheslavo@nvidia.com>; dev@dpdk.org > Cc: Matan Azrad <matan@nvidia.com>; Suanming Mou > <suanmingm@nvidia.com>; Dariusz Sosnowski <dsosnowski@nvidia.com>; > stable@dpdk.org > Subject: Re: [PATCH] net/mlx5/hws: fix ESP header match in strict mode > > External email: Use caution opening links or attachments > > > Hi, > > > On 04/08/2025 8:05 AM, Viacheslav Ovsiienko wrote: > > The pattern like "eth / ipv6 / esp / end" matched on any IPv6 packet > > in strict mode, because there was no impicit match on the IP.proto > > forced. > > > > This patch adds the implicit match on IP.proto with value 50 (ESP) and > > adds implicit match on UDP.dport with value 4500 for the case ESP over > > UDP. > > > > Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash") > > Cc: stable@dpdk.org > > > > Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> > > Patch applied to next-net-mlx, > > Kindest regards > Raslan Darawsheh ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH] net/mlx5/hws: fix ESP header match in strict mode 2025-09-08 12:12 ` Maayan Kashani @ 2025-09-08 12:26 ` Raslan Darawsheh 0 siblings, 0 replies; 8+ messages in thread From: Raslan Darawsheh @ 2025-09-08 12:26 UTC (permalink / raw) To: Maayan Kashani, Slava Ovsiienko, dev Cc: Matan Azrad, Suanming Mou, Dariusz Sosnowski, stable On 08/09/2025 3:12 PM, Maayan Kashani wrote: > Please drop this patch, > the merge was incorrect and the code is incorrect. there isn't anything wrong in the merge, the patch was merged as is. I'll drop this version of this patch and you can send a V2 for it. Kindest regards Raslan Darawsheh ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 1/3] net/mlx5: fix ESP header match in strict mode 2025-08-04 5:05 [PATCH] net/mlx5/hws: fix ESP header match in strict mode Viacheslav Ovsiienko 2025-08-08 7:30 ` Dariusz Sosnowski 2025-08-18 6:33 ` Raslan Darawsheh @ 2025-09-09 6:28 ` Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 2/3] net/mlx5: fix ESP item validation to match on seqnum Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 3/3] net/mlx5: fix ESP header match after UDP for group 0 Maayan Kashani 2 siblings, 2 replies; 8+ messages in thread From: Maayan Kashani @ 2025-09-09 6:28 UTC (permalink / raw) To: dev Cc: mkashani, dsosnowski, rasland, Viacheslav Ovsiienko, stable, Matan Azrad, Bing Zhao, Ori Kam, Suanming Mou, Hamdan Igbaria From: Viacheslav Ovsiienko <viacheslavo@nvidia.com> The pattern like "eth / ipv6 / esp / end" matched on any IPv6 packet in strict mode, because there was no implicit match on the IP.proto forced. This patch adds the implicit match on IP.proto with value 50 (ESP) and adds implicit match on UDP.dport with value 4500 for the case ESP over UDP. Fixes: 81cf20a25abf ("net/mlx5/hws: support match on ESP item") Cc: stable@dpdk.org Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> --- drivers/net/mlx5/hws/mlx5dr_definer.c | 29 +++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 7464d953739..02eba861bc5 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -14,6 +14,7 @@ #define UDP_VXLAN_PORT 4789 #define UDP_VXLAN_GPE_PORT 4790 #define UDP_GTPU_PORT 2152 +#define UDP_ESP_PORT 4500 #define UDP_PORT_MPLS 6635 #define UDP_GENEVE_PORT 6081 #define UDP_ROCEV2_PORT 4791 @@ -231,6 +232,8 @@ struct mlx5dr_definer_conv_data { X(SET_BE16, nvgre_protocol, v->protocol, rte_flow_item_nvgre) \ X(SET_BE32P, nvgre_dw1, &v->tni[0], rte_flow_item_nvgre) \ X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \ + X(SET, ipsec_protocol, IPPROTO_ESP, rte_flow_item_esp) \ + X(SET, ipsec_udp_port, UDP_ESP_PORT, rte_flow_item_esp) \ X(SET_BE32, ipsec_spi, v->hdr.spi, rte_flow_item_esp) \ X(SET_BE32, ipsec_sequence_number, v->hdr.seq, rte_flow_item_esp) \ X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \ @@ -2930,6 +2933,32 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd, const struct rte_flow_item_esp *m = item->mask; struct mlx5dr_definer_fc *fc; + /* To match on ESP we must match on ip_protocol and optionally on l4_dport */ + if (!cd->relaxed) { + bool over_udp; + + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; + over_udp = fc->tag_set == &mlx5dr_definer_udp_protocol_set; + + if (over_udp) { + fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + fc->tag_set = &mlx5dr_definer_ipsec_udp_port_set; + DR_CALC_SET(fc, eth_l4, destination_port, false); + } + } else { + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)]; + if (!fc->tag_set) { + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ipsec_protocol_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l3, protocol_next_header, false); + } + } + } + if (!m) return 0; if (m->hdr.spi) { -- 2.21.0 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 2/3] net/mlx5: fix ESP item validation to match on seqnum 2025-09-09 6:28 ` [PATCH v2 1/3] net/mlx5: " Maayan Kashani @ 2025-09-09 6:28 ` Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 3/3] net/mlx5: fix ESP header match after UDP for group 0 Maayan Kashani 1 sibling, 0 replies; 8+ messages in thread From: Maayan Kashani @ 2025-09-09 6:28 UTC (permalink / raw) To: dev Cc: mkashani, dsosnowski, rasland, Viacheslav Ovsiienko, stable, Matan Azrad, Bing Zhao, Ori Kam, Suanming Mou, Raja Zidane From: Viacheslav Ovsiienko <viacheslavo@nvidia.com> The match on ESP sequence number is supported by hardware steering implementation but was rejected in validation routine shared by all steering engines. This patch allows validation to pass with requested match on ESP sequence number for hardware steering engine. Fixes: fb96caa56aab ("net/mlx5: support ESP item on Windows") Cc: stable@dpdk.org Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> --- drivers/net/mlx5/linux/mlx5_flow_os.c | 10 +++++++++- drivers/net/mlx5/linux/mlx5_flow_os.h | 3 +++ drivers/net/mlx5/mlx5_flow_dv.c | 6 ++---- drivers/net/mlx5/mlx5_flow_hw.c | 5 ++--- drivers/net/mlx5/mlx5_flow_verbs.c | 6 ++---- drivers/net/mlx5/windows/mlx5_flow_os.c | 10 +++++++++- drivers/net/mlx5/windows/mlx5_flow_os.h | 3 +++ 7 files changed, 30 insertions(+), 13 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c index af8c02c38b8..777125e9a87 100644 --- a/drivers/net/mlx5/linux/mlx5_flow_os.c +++ b/drivers/net/mlx5/linux/mlx5_flow_os.c @@ -18,6 +18,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, + bool allow_seq, struct rte_flow_error *error) { const struct rte_flow_item_esp *mask = item->mask; @@ -26,6 +27,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, MLX5_FLOW_LAYER_OUTER_L3; const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : MLX5_FLOW_LAYER_OUTER_L4; + static const struct rte_flow_item_esp mlx5_flow_item_esp_mask = { + .hdr = { + .spi = RTE_BE32(0xffffffff), + .seq = RTE_BE32(0xffffffff), + }, + }; int ret; if (!mlx5_hws_active(dev)) { @@ -47,7 +54,8 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, mask = &rte_flow_item_esp_mask; ret = mlx5_flow_item_acceptable (dev, item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_esp_mask, + allow_seq ? (const uint8_t *)&mlx5_flow_item_esp_mask : + (const uint8_t *)&rte_flow_item_esp_mask, sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.h b/drivers/net/mlx5/linux/mlx5_flow_os.h index 35b5871ab91..21a2ed5bec0 100644 --- a/drivers/net/mlx5/linux/mlx5_flow_os.h +++ b/drivers/net/mlx5/linux/mlx5_flow_os.h @@ -514,6 +514,8 @@ mlx5_os_flow_dr_sync_domain(void *domain, uint32_t flags) * Bit-fields that holds the items detected until now. * @param[in] target_protocol * The next protocol in the previous item. + * @param[in] allow_seq + * The match on sequence number is supported. * @param[out] error * Pointer to error structure. * @@ -525,6 +527,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, + bool allow_seq, struct rte_flow_error *error); /** diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index abfd54da1a9..18d0d293770 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -7859,10 +7859,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_ESP: - ret = mlx5_flow_os_validate_item_esp(dev, items, - item_flags, - next_protocol, - error); + ret = mlx5_flow_os_validate_item_esp(dev, items, item_flags, + next_protocol, false, error); if (ret < 0) return ret; last_item = MLX5_FLOW_ITEM_ESP; diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index c84ae726a74..2ca40b41465 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -8818,9 +8818,8 @@ __flow_hw_pattern_validate(struct rte_eth_dev *dev, last_item = MLX5_FLOW_ITEM_QUOTA; break; case RTE_FLOW_ITEM_TYPE_ESP: - ret = mlx5_flow_os_validate_item_esp(dev, item, - *item_flags, 0xff, - error); + ret = mlx5_flow_os_validate_item_esp(dev, item, *item_flags, + 0xff, true, error); if (ret < 0) return ret; last_item = MLX5_FLOW_ITEM_ESP; diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 5b4a4eda3bb..67d199ce15e 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1332,10 +1332,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, switch (items->type) { #ifdef HAVE_IBV_FLOW_SPEC_ESP case RTE_FLOW_ITEM_TYPE_ESP: - ret = mlx5_flow_os_validate_item_esp(dev, items, - item_flags, - next_protocol, - error); + ret = mlx5_flow_os_validate_item_esp(dev, items, item_flags, + next_protocol, false, error); if (ret < 0) return ret; last_item = MLX5_FLOW_ITEM_ESP; diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c b/drivers/net/mlx5/windows/mlx5_flow_os.c index bf93da9f1e1..7a625fb880a 100644 --- a/drivers/net/mlx5/windows/mlx5_flow_os.c +++ b/drivers/net/mlx5/windows/mlx5_flow_os.c @@ -428,6 +428,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, + bool allow_seq, struct rte_flow_error *error) { const struct rte_flow_item_esp *mask = item->mask; @@ -437,6 +438,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, MLX5_FLOW_LAYER_OUTER_L3; const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : MLX5_FLOW_LAYER_OUTER_L4; + static const struct rte_flow_item_esp mlx5_flow_item_esp_mask = { + .hdr = { + .spi = RTE_BE32(0xffffffff), + .seq = RTE_BE32(0xffffffff), + }, + }; int ret; if (!(item_flags & l3m)) @@ -461,7 +468,8 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, " supported on Windows"); ret = mlx5_flow_item_acceptable (dev, item, (const uint8_t *)mask, - (const uint8_t *)&rte_flow_item_esp_mask, + allow_seq ? (const uint8_t *)&mlx5_flow_item_esp_mask : + (const uint8_t *)&rte_flow_item_esp_mask, sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED, error); if (ret < 0) diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.h b/drivers/net/mlx5/windows/mlx5_flow_os.h index 36edc3d5328..2cd4e953256 100644 --- a/drivers/net/mlx5/windows/mlx5_flow_os.h +++ b/drivers/net/mlx5/windows/mlx5_flow_os.h @@ -461,6 +461,8 @@ int mlx5_flow_os_destroy_flow(void *drv_flow_ptr); * Bit-fields that holds the items detected until now. * @param[in] target_protocol * The next protocol in the previous item. + * @param[in] allow_seq + * The match on sequence number is supported. * @param[out] error * Pointer to error structure. * @@ -472,6 +474,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, const struct rte_flow_item *item, uint64_t item_flags, uint8_t target_protocol, + bool allow_seq, struct rte_flow_error *error); /** -- 2.21.0 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 3/3] net/mlx5: fix ESP header match after UDP for group 0 2025-09-09 6:28 ` [PATCH v2 1/3] net/mlx5: " Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 2/3] net/mlx5: fix ESP item validation to match on seqnum Maayan Kashani @ 2025-09-09 6:28 ` Maayan Kashani 1 sibling, 0 replies; 8+ messages in thread From: Maayan Kashani @ 2025-09-09 6:28 UTC (permalink / raw) To: dev Cc: mkashani, dsosnowski, rasland, Viacheslav Ovsiienko, stable, Matan Azrad, Bing Zhao, Ori Kam, Suanming Mou, Raja Zidane From: Viacheslav Ovsiienko <viacheslavo@nvidia.com> The ESP item translation routine always forced the match on IP next protocol to be 50 (ESP). This prevented on matching ESP packets over UDP. The patch checks if UDP header is expected, and also forces match on UDP destination port 4500 if it is not set by the caller yet. Fixes: 18ca4a4ec73a ("net/mlx5: support ESP SPI match and RSS hash") Cc: stable@dpdk.org Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> --- drivers/net/mlx5/linux/mlx5_flow_os.c | 6 ----- drivers/net/mlx5/mlx5_flow.h | 3 +++ drivers/net/mlx5/mlx5_flow_dv.c | 34 ++++++++++++++++----------- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c index 777125e9a87..f5eee46e44b 100644 --- a/drivers/net/mlx5/linux/mlx5_flow_os.c +++ b/drivers/net/mlx5/linux/mlx5_flow_os.c @@ -25,8 +25,6 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 : MLX5_FLOW_LAYER_OUTER_L3; - const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : - MLX5_FLOW_LAYER_OUTER_L4; static const struct rte_flow_item_esp mlx5_flow_item_esp_mask = { .hdr = { .spi = RTE_BE32(0xffffffff), @@ -41,10 +39,6 @@ mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ITEM, item, "L3 is mandatory to filter on L4"); } - if (item_flags & l4m) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "multiple L4 layers not supported"); if (target_protocol != 0xff && target_protocol != IPPROTO_ESP) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 367dacc2779..ff617060549 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -489,6 +489,9 @@ struct mlx5_mirror { /* UDP port numbers for GENEVE. */ #define MLX5_UDP_PORT_GENEVE 6081 +/* UDP port numbers for ESP. */ +#define MLX5_UDP_PORT_ESP 4500 + /* Lowest priority indicator. */ #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 18d0d293770..bcce1597e2d 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -9713,29 +9713,35 @@ flow_dv_translate_item_tcp(void *key, const struct rte_flow_item *item, */ static void flow_dv_translate_item_esp(void *key, const struct rte_flow_item *item, - int inner, uint32_t key_type) + int inner, uint32_t key_type, uint64_t item_flags) { const struct rte_flow_item_esp *esp_m; const struct rte_flow_item_esp *esp_v; void *headers_v; char *spi_v; + bool over_udp = item_flags & (inner ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP); headers_v = inner ? MLX5_ADDR_OF(fte_match_param, key, inner_headers) : - MLX5_ADDR_OF(fte_match_param, key, outer_headers); - if (key_type & MLX5_SET_MATCHER_M) - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ip_protocol, 0xff); - else - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - ip_protocol, IPPROTO_ESP); + MLX5_ADDR_OF(fte_match_param, key, outer_headers); + if (key_type & MLX5_SET_MATCHER_M) { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff); + if (over_udp && !MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, 0xFFFF); + } else { + if (!over_udp) + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP); + else + if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + MLX5_UDP_PORT_ESP); + } if (MLX5_ITEM_VALID(item, key_type)) return; - MLX5_ITEM_UPDATE(item, key_type, esp_v, esp_m, - &rte_flow_item_esp_mask); + MLX5_ITEM_UPDATE(item, key_type, esp_v, esp_m, &rte_flow_item_esp_mask); headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); - spi_v = inner ? MLX5_ADDR_OF(fte_match_set_misc, headers_v, - inner_esp_spi) : MLX5_ADDR_OF(fte_match_set_misc - , headers_v, outer_esp_spi); + spi_v = inner ? MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi) : + MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi); *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi; } @@ -14224,7 +14230,7 @@ flow_dv_translate_items(struct rte_eth_dev *dev, switch (item_type) { case RTE_FLOW_ITEM_TYPE_ESP: - flow_dv_translate_item_esp(key, items, tunnel, key_type); + flow_dv_translate_item_esp(key, items, tunnel, key_type, wks->item_flags); wks->priority = MLX5_PRIORITY_MAP_L4; last_item = MLX5_FLOW_ITEM_ESP; break; -- 2.21.0 ^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2025-09-09 6:29 UTC | newest] Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2025-08-04 5:05 [PATCH] net/mlx5/hws: fix ESP header match in strict mode Viacheslav Ovsiienko 2025-08-08 7:30 ` Dariusz Sosnowski 2025-08-18 6:33 ` Raslan Darawsheh 2025-09-08 12:12 ` Maayan Kashani 2025-09-08 12:26 ` Raslan Darawsheh 2025-09-09 6:28 ` [PATCH v2 1/3] net/mlx5: " Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 2/3] net/mlx5: fix ESP item validation to match on seqnum Maayan Kashani 2025-09-09 6:28 ` [PATCH v2 3/3] net/mlx5: fix ESP header match after UDP for group 0 Maayan Kashani
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).