From: "Wang, Haiyue" <haiyue.wang@intel.com>
To: "Yu, DapengX" <dapengx.yu@intel.com>,
"Yang, Qiming" <qiming.yang@intel.com>,
"Zhang, Qi Z" <qi.z.zhang@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
"Yu, DapengX" <dapengx.yu@intel.com>,
"stable@dpdk.org" <stable@dpdk.org>
Subject: Re: [dpdk-dev] [dpdk-stable] [PATCH] net/ice: fix function pointer in multi-process
Date: Fri, 22 Oct 2021 07:55:47 +0000 [thread overview]
Message-ID: <DM4PR11MB55182531C0CC18E677273A3CF7809@DM4PR11MB5518.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20211021033527.177448-1-dapengx.yu@intel.com>
> -----Original Message-----
> From: stable <stable-bounces@dpdk.org> On Behalf Of dapengx.yu@intel.com
> Sent: Thursday, October 21, 2021 11:35
> To: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org; Yu, DapengX <dapengx.yu@intel.com>; stable@dpdk.org
> Subject: [dpdk-stable] [PATCH] net/ice: fix function pointer in multi-process
>
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> The sharing of function pointer may cause crash of secondary process.
> This patch fixes it.
>
> Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
> Cc: stable@dpdk.org
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> ---
> drivers/net/ice/ice_rxtx.c | 35 +++++++++++++++++++++++------------
> drivers/net/ice/ice_rxtx.h | 2 +-
> 2 files changed, 24 insertions(+), 13 deletions(-)
>
Basically LGTM. Just some comments.
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
> index ff362c21d9..7bb0ac4de3 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -205,51 +205,62 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
> #endif
> }
>
> +static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields[] = {
Let's rename it as "rxd_to_pkt_fields_ops", the original name looks like a variable. ;-)
> + [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
> + [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
> + [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
> + [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
> +};
> +
> void
> ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
> {
Only use:
rxq->rxdid = rxdid;
Then remove all case ICE_RXDID_xxx, just keep the default:
rxq->rxdid = ICE_RXDID_COMMS_OVS;
> switch (rxdid) {
> case ICE_RXDID_COMMS_AUX_VLAN:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_VLAN;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV4:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV4;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6;
> break;
>
> case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IPV6_FLOW;
> break;
>
> case ICE_RXDID_COMMS_AUX_TCP:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_TCP;
> break;
>
> case ICE_RXDID_COMMS_AUX_IP_OFFSET:
> rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
> + rxq->rxdid = ICE_RXDID_COMMS_AUX_IP_OFFSET;
> break;
>
> case ICE_RXDID_COMMS_GENERIC:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
> + rxq->rxdid = ICE_RXDID_COMMS_GENERIC;
> break;
>
> case ICE_RXDID_COMMS_OVS:
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> + rxq->rxdid = ICE_RXDID_COMMS_OVS;
> break;
>
This case ' ICE_RXDID_COMMS_GENERIC ' and ' ICE_RXDID_COMMS_OVS ' can be removed.
> default:
> /* update this according to the RXDID for PROTO_XTR_NONE */
> - rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
> + rxq->rxdid = ICE_RXDID_COMMS_OVS;
> break;
> }
>
> @@ -1622,7 +1633,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
> mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(mb, &rxdp[j]);
> - rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, mb, &rxdp[j]);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> ts_ns = ice_tstamp_convert_32b_64b(hw,
> @@ -1939,7 +1950,7 @@ ice_recv_scattered_pkts(void *rx_queue,
> first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(first_seg, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, first_seg, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> @@ -2370,7 +2381,7 @@ ice_recv_pkts(void *rx_queue,
> rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
> rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
> ice_rxd_to_vlan_tci(rxm, &rxd);
> - rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
> + rxd_to_pkt_fields[rxq->rxdid](rxq, rxm, &rxd);
> pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
> #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
> if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
> diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
> index e1c644fb63..146dc1f95d 100644
> --- a/drivers/net/ice/ice_rxtx.h
> +++ b/drivers/net/ice/ice_rxtx.h
> @@ -89,7 +89,7 @@ struct ice_rx_queue {
> bool rx_deferred_start; /* don't start this queue in dev start */
> uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
> uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
> - ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
> + uint32_t rxdid; /* Receive Flex Descriptor profile ID */
> ice_rx_release_mbufs_t rx_rel_mbufs;
> uint64_t offloads;
> uint32_t time_high;
> --
> 2.27.0
next prev parent reply other threads:[~2021-10-22 7:55 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-21 3:35 [dpdk-dev] " dapengx.yu
2021-10-22 7:55 ` Wang, Haiyue [this message]
2021-10-22 8:57 ` [dpdk-dev] [PATCH v2] " dapengx.yu
2021-10-22 11:08 ` Wang, Haiyue
2021-10-25 3:40 ` [dpdk-dev] [PATCH v3] " dapengx.yu
2021-10-25 5:40 ` [dpdk-dev] [PATCH v4] " dapengx.yu
2021-10-25 5:47 ` Wang, Haiyue
2021-10-26 1:55 ` [dpdk-dev] [PATCH v5] " dapengx.yu
2021-10-27 3:28 ` Zhang, Qi Z
2022-02-24 18:00 ` Navin Srinivas
2022-02-25 1:34 ` Zhang, Qi Z
2022-02-25 10:24 ` Navin Srinivas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=DM4PR11MB55182531C0CC18E677273A3CF7809@DM4PR11MB5518.namprd11.prod.outlook.com \
--to=haiyue.wang@intel.com \
--cc=dapengx.yu@intel.com \
--cc=dev@dpdk.org \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).