* [dpdk-dev] [PATCH] net/ice: fix function pointer in multi-process
@ 2021-10-21 3:35 dapengx.yu
2021-10-22 7:55 ` [dpdk-dev] [dpdk-stable] " Wang, Haiyue
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
0 siblings, 2 replies; 12+ messages in thread
From: dapengx.yu @ 2021-10-21 3:35 UTC (permalink / raw)
To: Qiming Yang, Qi Zhang; +Cc: dev, Dapeng Yu, stable
From: Dapeng Yu <dapengx.yu@intel.com>
The sharing of function pointer may cause crash of secondary process.
This patch fixes it.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
drivers/net/ice/ice_rxtx.c | 35 +++++++++++++++++++++++------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 24 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9..7bb0ac4de3 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -205,51 +205,62 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_VLAN;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV4;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6_FLOW;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_TCP;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
+ rxq->rxdid = ICE_RXDID_COMMS_AUX_IP_OFFSET;
break;
case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
+ rxq->rxdid = ICE_RXDID_COMMS_GENERIC;
break;
case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1622,7 +1633,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1939,7 +1950,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
@@ -2370,7 +2381,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [dpdk-stable] [PATCH] net/ice: fix function pointer in multi-process
2021-10-21 3:35 [dpdk-dev] [PATCH] net/ice: fix function pointer in multi-process dapengx.yu
@ 2021-10-22 7:55 ` Wang, Haiyue
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
1 sibling, 0 replies; 12+ messages in thread
From: Wang, Haiyue @ 2021-10-22 7:55 UTC (permalink / raw)
To: Yu, DapengX, Yang, Qiming, Zhang, Qi Z; +Cc: dev, Yu, DapengX, stable
> -----Original Message-----
> From: stable <stable-bounces@dpdk.org> On Behalf Of dapengx.yu@intel.com
> Sent: Thursday, October 21, 2021 11:35
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Yu, DapengX <dapengx.yu@intel.com>; stable@dpdk.org
> Subject: [dpdk-stable] [PATCH] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> The sharing of function pointer may cause crash of secondary process.
> This patch fixes it.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> ---
> drivers/net/ice/ice_rxtx.c | 35 +++++++++++++++++++++++------------
> drivers/net/ice/ice_rxtx.h | 2 +-
> 2 files changed, 24 insertions(+), 13 deletions(-)
>
Basically LGTM. Just some comments.
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> index ff362c21d9..7bb0ac4de3 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -205,51 +205,62 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> #endif
> }
>
> +static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields[] = {
Let's rename it as "rxd_to_pkt_fields_ops", the original name looks like a variable. ;-)
> + [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
> + [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
> + [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
> +};
> +
> void
> ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
> {
Only use:
rxq->rxdid = rxdid;
Then remove all case ICE_RXDID_xxx, just keep the default:
rxq->rxdid = ICE_RXDID_COMMS_OVS;
> switch (rxdid) {
> case ICE_RXDID_COMMS_AUX_VLAN:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_VLAN;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV4:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV4;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6_FLOW;
> break;
>
> case ICE_RXDID_COMMS_AUX_TCP:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_TCP;
> break;
>
> case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IP_OFFSET;
> break;
>
> case ICE_RXDID_COMMS_GENERIC:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
> + rxq->rxdid = ICE_RXDID_COMMS_GENERIC;
> break;
>
> case ICE_RXDID_COMMS_OVS:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> + rxq->rxdid = ICE_RXDID_COMMS_OVS;
> break;
>
This case ' ICE_RXDID_COMMS_GENERIC ' and ' ICE_RXDID_COMMS_OVS ' can be removed.
> default:
> /* update this according to the RXDID for PROTO_XTR_NONE */
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> + rxq->rxdid = ICE_RXDID_COMMS_OVS;
> break;
> }
>
> @@ -1622,7 +1633,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> - rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, mb, &rxdp[j]);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> ts_ns = ice_tstamp_convert_32b_64b(hw,
> @@ -1939,7 +1950,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(first_seg, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, first_seg, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> @@ -2370,7 +2381,7 @@ ice_recv_pkts(void *rx_queue,
> rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(rxm, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, rxm, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
> index e1c644fb63..146dc1f95d 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -89,7 +89,7 @@ struct ice_rx_queue {
> bool rx_deferred_start; /* don't start this queue in dev start */
> uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> - ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
> + uint32_t rxdid; /* Receive Flex Descriptor profile ID */
> ice_rx_release_mbufs_t rx_rel_mbufs;
> uint64_t offloads;
> uint32_t time_high;
> --
> 2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH v2] net/ice: fix function pointer in multi-process
2021-10-21 3:35 [dpdk-dev] [PATCH] net/ice: fix function pointer in multi-process dapengx.yu
2021-10-22 7:55 ` [dpdk-dev] [dpdk-stable] " Wang, Haiyue
@ 2021-10-22 8:57 ` dapengx.yu
2021-10-22 11:08 ` Wang, Haiyue
2021-10-25 3:40 ` [dpdk-dev] [PATCH v3] " dapengx.yu
1 sibling, 2 replies; 12+ messages in thread
From: dapengx.yu @ 2021-10-22 8:57 UTC (permalink / raw)
To: Qiming Yang, Qi Zhang; +Cc: dev, haiyue.wang, Dapeng Yu, stable
From: Dapeng Yu <dapengx.yu@intel.com>
The sharing of function pointer may cause crash of secondary process.
This patch removes the shared function pointer: "rxd_to_pkt_fields" in
the instance of "struct ice_rx_queue" which is shared between primary
and secondary process, and uses an index of function pointer array to
replace it.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Remove redundant code
---
drivers/net/ice/ice_rxtx.c | 35 +++++++++++++++++------------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 18 insertions(+), 19 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9..667eae9f6d 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -205,51 +205,50 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
- break;
-
- case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
- break;
-
- case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1622,7 +1621,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1939,7 +1938,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
@@ -2370,7 +2369,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/ice: fix function pointer in multi-process
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
@ 2021-10-22 11:08 ` Wang, Haiyue
2021-10-25 3:40 ` [dpdk-dev] [PATCH v3] " dapengx.yu
1 sibling, 0 replies; 12+ messages in thread
From: Wang, Haiyue @ 2021-10-22 11:08 UTC (permalink / raw)
To: Yu, DapengX, Yang, Qiming, Zhang, Qi Z; +Cc: dev, stable
> -----Original Message-----
> From: Yu, DapengX <dapengx.yu@intel.com>
> Sent: Friday, October 22, 2021 16:58
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yu, DapengX <dapengx.yu@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v2] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> The sharing of function pointer may cause crash of secondary process.
>
> This patch removes the shared function pointer: "rxd_to_pkt_fields" in
> the instance of "struct ice_rx_queue" which is shared between primary
> and secondary process, and uses an index of function pointer array to
> replace it.
I think we can simplify it something as:
Use the index value to call the function, instead of the function pointer
assignment to save the selection of Receive Flex Descriptor profile ID.
Otherwise the secondary process will run with wrong function address from
primary process.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> ---
> V2:
> * Remove redundant code
> ---
> drivers/net/ice/ice_rxtx.c | 35 +++++++++++++++++------------------
> drivers/net/ice/ice_rxtx.h | 2 +-
> 2 files changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> index ff362c21d9..667eae9f6d 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -205,51 +205,50 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> #endif
> }
>
> +static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
> + [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
> + [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
> + [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
> +};
> +
> void
> ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
> {
> + rxq->rxdid = rxdid;
> +
> switch (rxdid) {
> case ICE_RXDID_COMMS_AUX_VLAN:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV4:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> break;
>
> case ICE_RXDID_COMMS_AUX_TCP:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> break;
>
> case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
> - break;
> -
> - case ICE_RXDID_COMMS_GENERIC:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
> - break;
> -
> - case ICE_RXDID_COMMS_OVS:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> break;
My fault, we need to keep the above case explicitly, otherwise it will be
set to default ICE_RXDID_COMMS_OVS. ;-)
case ICE_RXDID_COMMS_GENERIC:
/* fallthrough */
case ICE_RXDID_COMMS_OVS:
break;
>
> default:
> /* update this according to the RXDID for PROTO_XTR_NONE */
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> + rxq->rxdid = ICE_RXDID_COMMS_OVS;
> break;
> }
>
> @@ -1622,7 +1621,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> - rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> ts_ns = ice_tstamp_convert_32b_64b(hw,
> @@ -1939,7 +1938,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(first_seg, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> @@ -2370,7 +2369,7 @@ ice_recv_pkts(void *rx_queue,
> rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(rxm, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> + rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
> index e1c644fb63..146dc1f95d 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -89,7 +89,7 @@ struct ice_rx_queue {
> bool rx_deferred_start; /* don't start this queue in dev start */
> uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> - ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
> + uint32_t rxdid; /* Receive Flex Descriptor profile ID */
> ice_rx_release_mbufs_t rx_rel_mbufs;
> uint64_t offloads;
> uint32_t time_high;
> --
> 2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH v3] net/ice: fix function pointer in multi-process
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
2021-10-22 11:08 ` Wang, Haiyue
@ 2021-10-25 3:40 ` dapengx.yu
2021-10-25 5:40 ` [dpdk-dev] [PATCH v4] " dapengx.yu
1 sibling, 1 reply; 12+ messages in thread
From: dapengx.yu @ 2021-10-25 3:40 UTC (permalink / raw)
To: Qiming Yang, Qi Zhang; +Cc: dev, haiyue.wang, Dapeng Yu, stable
From: Dapeng Yu <dapengx.yu@intel.com>
This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.
Otherwise the secondary process will run with wrong function address from
primary process.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Remove redundant code
V3:
* Fix incorrect switch statements in V2
---
drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9..5e8a7d1eee 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -205,51 +205,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
break;
case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
- break;
-
+ /* fallthrough */
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1622,7 +1626,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1939,7 +1943,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
@@ -2370,7 +2374,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH v4] net/ice: fix function pointer in multi-process
2021-10-25 3:40 ` [dpdk-dev] [PATCH v3] " dapengx.yu
@ 2021-10-25 5:40 ` dapengx.yu
2021-10-25 5:47 ` Wang, Haiyue
2021-10-26 1:55 ` [dpdk-dev] [PATCH v5] " dapengx.yu
0 siblings, 2 replies; 12+ messages in thread
From: dapengx.yu @ 2021-10-25 5:40 UTC (permalink / raw)
To: Qiming Yang, Qi Zhang; +Cc: dev, haiyue.wang, Dapeng Yu, stable
From: Dapeng Yu <dapengx.yu@intel.com>
This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.
Otherwise the secondary process will run with wrong function address
from primary process.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Remove redundant code
V3:
* Fix incorrect switch statements in V2
V4:
* Combine similar case statements
---
drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ff362c21d9..473cf1c813 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -205,51 +205,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
- break;
-
+ /* fallthrough */
case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1622,7 +1626,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1939,7 +1943,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
@@ -2370,7 +2374,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/ice: fix function pointer in multi-process
2021-10-25 5:40 ` [dpdk-dev] [PATCH v4] " dapengx.yu
@ 2021-10-25 5:47 ` Wang, Haiyue
2021-10-26 1:55 ` [dpdk-dev] [PATCH v5] " dapengx.yu
1 sibling, 0 replies; 12+ messages in thread
From: Wang, Haiyue @ 2021-10-25 5:47 UTC (permalink / raw)
To: Yu, DapengX, Yang, Qiming, Zhang, Qi Z; +Cc: dev, stable
> -----Original Message-----
> From: Yu, DapengX <dapengx.yu@intel.com>
> Sent: Monday, October 25, 2021 13:40
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yu, DapengX <dapengx.yu@intel.com>;
> stable@dpdk.org
> Subject: [PATCH v4] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> This patch uses the index value to call the function, instead of the
> function pointer assignment to save the selection of Receive Flex
> Descriptor profile ID.
>
> Otherwise the secondary process will run with wrong function address
> from primary process.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> ---
> V2:
> * Remove redundant code
> V3:
> * Fix incorrect switch statements in V2
> V4:
> * Combine similar case statements
> ---
> drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
> drivers/net/ice/ice_rxtx.h | 2 +-
> 2 files changed, 19 insertions(+), 15 deletions(-)
>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
> --
> 2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
2021-10-25 5:40 ` [dpdk-dev] [PATCH v4] " dapengx.yu
2021-10-25 5:47 ` Wang, Haiyue
@ 2021-10-26 1:55 ` dapengx.yu
2021-10-27 3:28 ` Zhang, Qi Z
1 sibling, 1 reply; 12+ messages in thread
From: dapengx.yu @ 2021-10-26 1:55 UTC (permalink / raw)
To: Qiming Yang, Qi Zhang; +Cc: dev, haiyue.wang, Dapeng Yu, stable
From: Dapeng Yu <dapengx.yu@intel.com>
This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.
Otherwise the secondary process will run with wrong function address
from primary process.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
---
V2:
* Remove redundant code
V3:
* Fix incorrect switch statements in V2
V4:
* Combine similar case statements
V5:
* Rebase
---
drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index e7217661dd..c3cad2fbbb 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -204,51 +204,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
- break;
-
+ /* fallthrough */
case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1608,7 +1612,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1925,7 +1929,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
@@ -2356,7 +2360,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
2021-10-26 1:55 ` [dpdk-dev] [PATCH v5] " dapengx.yu
@ 2021-10-27 3:28 ` Zhang, Qi Z
2022-02-24 18:00 ` Navin Srinivas
0 siblings, 1 reply; 12+ messages in thread
From: Zhang, Qi Z @ 2021-10-27 3:28 UTC (permalink / raw)
To: Yu, DapengX, Yang, Qiming; +Cc: dev, Wang, Haiyue, stable
> -----Original Message-----
> From: Yu, DapengX <dapengx.yu@intel.com>
> Sent: Tuesday, October 26, 2021 9:56 AM
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yu, DapengX
> <dapengx.yu@intel.com>; stable@dpdk.org
> Subject: [PATCH v5] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> This patch uses the index value to call the function, instead of the function
> pointer assignment to save the selection of Receive Flex Descriptor profile ID.
>
> Otherwise the secondary process will run with wrong function address from
> primary process.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
2021-10-27 3:28 ` Zhang, Qi Z
@ 2022-02-24 18:00 ` Navin Srinivas
2022-02-25 1:34 ` Zhang, Qi Z
0 siblings, 1 reply; 12+ messages in thread
From: Navin Srinivas @ 2022-02-24 18:00 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: Yu, DapengX, Yang, Qiming, dev, Wang, Haiyue, stable
[-- Attachment #1: Type: text/plain, Size: 1173 bytes --]
Hi,
Whether this fix is applicable for VF? I do not see this change ported to
VF in DPDK-20.11.4.
Thanks,
Navin Srinivas
On Wed, Oct 27, 2021 at 8:58 AM Zhang, Qi Z <qi.z.zhang@intel.com> wrote:
>
>
> > -----Original Message-----
> > From: Yu, DapengX <dapengx.yu@intel.com>
> > Sent: Tuesday, October 26, 2021 9:56 AM
> > To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yu, DapengX
> > <dapengx.yu@intel.com>; stable@dpdk.org
> > Subject: [PATCH v5] net/ice: fix function pointer in multi-process
> >
> > From: Dapeng Yu <dapengx.yu@intel.com>
> >
> > This patch uses the index value to call the function, instead of the
> function
> > pointer assignment to save the selection of Receive Flex Descriptor
> profile ID.
> >
> > Otherwise the secondary process will run with wrong function address from
> > primary process.
> >
> > Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> > Acked-by: Haiyue Wang <haiyue.wang@intel.com>
>
> Applied to dpdk-next-net-intel.
>
> Thanks
> Qi
>
[-- Attachment #2: Type: text/html, Size: 2389 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
2022-02-24 18:00 ` Navin Srinivas
@ 2022-02-25 1:34 ` Zhang, Qi Z
2022-02-25 10:24 ` Navin Srinivas
0 siblings, 1 reply; 12+ messages in thread
From: Zhang, Qi Z @ 2022-02-25 1:34 UTC (permalink / raw)
To: Navin Srinivas; +Cc: Yu, DapengX, Yang, Qiming, dev, Wang, Haiyue, stable
[-- Attachment #1: Type: text/plain, Size: 2081 bytes --]
We need a separate fix for AVF which is ongoing.
And DPDK 20.11.4 LTS by default will only backport all the fix that during DPDK 21.11 dev cycle, so you may need to wait for 20.11.5 or just cherry pick by yourself.
Regards
Qi
From: Navin Srinivas <g.navinsrinivas@gmail.com>
Sent: Friday, February 25, 2022 2:00 AM
To: Zhang, Qi Z <qi.z.zhang@intel.com>
Cc: Yu, DapengX <dapengx.yu@intel.com>; Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
Hi,
Whether this fix is applicable for VF? I do not see this change ported to VF in DPDK-20.11.4.
Thanks,
Navin Srinivas
On Wed, Oct 27, 2021 at 8:58 AM Zhang, Qi Z <qi.z.zhang@intel.com<mailto:qi.z.zhang@intel.com>> wrote:
> -----Original Message-----
> From: Yu, DapengX <dapengx.yu@intel.com<mailto:dapengx.yu@intel.com>>
> Sent: Tuesday, October 26, 2021 9:56 AM
> To: Yang, Qiming <qiming.yang@intel.com<mailto:qiming.yang@intel.com>>; Zhang, Qi Z
> <qi.z.zhang@intel.com<mailto:qi.z.zhang@intel.com>>
> Cc: dev@dpdk.org<mailto:dev@dpdk.org>; Wang, Haiyue <haiyue.wang@intel.com<mailto:haiyue.wang@intel.com>>; Yu, DapengX
> <dapengx.yu@intel.com<mailto:dapengx.yu@intel.com>>; stable@dpdk.org<mailto:stable@dpdk.org>
> Subject: [PATCH v5] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com<mailto:dapengx.yu@intel.com>>
>
> This patch uses the index value to call the function, instead of the function
> pointer assignment to save the selection of Receive Flex Descriptor profile ID.
>
> Otherwise the secondary process will run with wrong function address from
> primary process.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org<mailto:stable@dpdk.org>
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com<mailto:dapengx.yu@intel.com>>
> Acked-by: Haiyue Wang <haiyue.wang@intel.com<mailto:haiyue.wang@intel.com>>
Applied to dpdk-next-net-intel.
Thanks
Qi
[-- Attachment #2: Type: text/html, Size: 5568 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in multi-process
2022-02-25 1:34 ` Zhang, Qi Z
@ 2022-02-25 10:24 ` Navin Srinivas
0 siblings, 0 replies; 12+ messages in thread
From: Navin Srinivas @ 2022-02-25 10:24 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: Yu, DapengX, Yang, Qiming, dev, Wang, Haiyue, stable
[-- Attachment #1: Type: text/plain, Size: 2003 bytes --]
Hi Qi,
Thank you for your reply and confirmation.
Regards,
Navin Srinivas
On Fri, Feb 25, 2022 at 7:04 AM Zhang, Qi Z <qi.z.zhang@intel.com> wrote:
> We need a separate fix for AVF which is ongoing.
>
> And DPDK 20.11.4 LTS by default will only backport all the fix that during
> DPDK 21.11 dev cycle, so you may need to wait for 20.11.5 or just cherry
> pick by yourself.
>
>
>
> Regards
>
> Qi
>
>
>
>
>
> *From:* Navin Srinivas <g.navinsrinivas@gmail.com>
> *Sent:* Friday, February 25, 2022 2:00 AM
> *To:* Zhang, Qi Z <qi.z.zhang@intel.com>
> *Cc:* Yu, DapengX <dapengx.yu@intel.com>; Yang, Qiming <
> qiming.yang@intel.com>; dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>;
> stable@dpdk.org
> *Subject:* Re: [dpdk-dev] [PATCH v5] net/ice: fix function pointer in
> multi-process
>
>
>
> Hi,
>
>
>
> Whether this fix is applicable for VF? I do not see this change ported to
> VF in DPDK-20.11.4.
>
>
>
> Thanks,
>
> Navin Srinivas
>
>
>
> On Wed, Oct 27, 2021 at 8:58 AM Zhang, Qi Z <qi.z.zhang@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Yu, DapengX <dapengx.yu@intel.com>
> > Sent: Tuesday, October 26, 2021 9:56 AM
> > To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> > <qi.z.zhang@intel.com>
> > Cc: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yu, DapengX
> > <dapengx.yu@intel.com>; stable@dpdk.org
> > Subject: [PATCH v5] net/ice: fix function pointer in multi-process
> >
> > From: Dapeng Yu <dapengx.yu@intel.com>
> >
> > This patch uses the index value to call the function, instead of the
> function
> > pointer assignment to save the selection of Receive Flex Descriptor
> profile ID.
> >
> > Otherwise the secondary process will run with wrong function address from
> > primary process.
> >
> > Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> > Acked-by: Haiyue Wang <haiyue.wang@intel.com>
>
> Applied to dpdk-next-net-intel.
>
> Thanks
> Qi
>
>
[-- Attachment #2: Type: text/html, Size: 5096 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2022-02-25 10:25 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-21 3:35 [dpdk-dev] [PATCH] net/ice: fix function pointer in multi-process dapengx.yu
2021-10-22 7:55 ` [dpdk-dev] [dpdk-stable] " Wang, Haiyue
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
2021-10-22 11:08 ` Wang, Haiyue
2021-10-25 3:40 ` [dpdk-dev] [PATCH v3] " dapengx.yu
2021-10-25 5:40 ` [dpdk-dev] [PATCH v4] " dapengx.yu
2021-10-25 5:47 ` Wang, Haiyue
2021-10-26 1:55 ` [dpdk-dev] [PATCH v5] " dapengx.yu
2021-10-27 3:28 ` Zhang, Qi Z
2022-02-24 18:00 ` Navin Srinivas
2022-02-25 1:34 ` Zhang, Qi Z
2022-02-25 10:24 ` Navin Srinivas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).