From: dapengx.yu@intel.com
To: Qiming Yang <qiming.yang@intel.com>, Qi Zhang <qi.z.zhang@intel.com>
Cc: dev@dpdk.org, haiyue.wang@intel.com,
Dapeng Yu <dapengx.yu@intel.com>,
stable@dpdk.org
Subject: [dpdk-stable] [PATCH v5] net/ice: fix function pointer in multi-process
Date: Tue, 26 Oct 2021 09:55:42 +0800 [thread overview]
Message-ID: <20211026015542.1416819-1-dapengx.yu@intel.com> (raw)
In-Reply-To: <20211025054029.1140977-1-dapengx.yu@intel.com>
From: Dapeng Yu <dapengx.yu@intel.com>
This patch uses the index value to call the function, instead of the
function pointer assignment to save the selection of Receive Flex
Descriptor profile ID.
Otherwise the secondary process will run with wrong function address
from primary process.
Fixes: 7a340b0b4e03 ("net/ice: refactor Rx FlexiMD handling")
Cc: stable@dpdk.org
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
---
V2:
* Remove redundant code
V3:
* Fix incorrect switch statements in V2
V4:
* Combine similar case statements
V5:
* Rebase
---
drivers/net/ice/ice_rxtx.c | 32 ++++++++++++++++++--------------
drivers/net/ice/ice_rxtx.h | 2 +-
2 files changed, 19 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index e7217661dd..c3cad2fbbb 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -204,51 +204,55 @@ ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
#endif
}
+static const ice_rxd_to_pkt_fields_t rxd_to_pkt_fields_ops[] = {
+ [ICE_RXDID_COMMS_AUX_VLAN] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV4] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_TCP] = ice_rxd_to_pkt_fields_by_comms_aux_v1,
+ [ICE_RXDID_COMMS_AUX_IP_OFFSET] = ice_rxd_to_pkt_fields_by_comms_aux_v2,
+ [ICE_RXDID_COMMS_GENERIC] = ice_rxd_to_pkt_fields_by_comms_generic,
+ [ICE_RXDID_COMMS_OVS] = ice_rxd_to_pkt_fields_by_comms_ovs,
+};
+
void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
+ rxq->rxdid = rxdid;
+
switch (rxdid) {
case ICE_RXDID_COMMS_AUX_VLAN:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV4:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_TCP:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
break;
case ICE_RXDID_COMMS_AUX_IP_OFFSET:
rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
case ICE_RXDID_COMMS_GENERIC:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
- break;
-
+ /* fallthrough */
case ICE_RXDID_COMMS_OVS:
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
default:
/* update this according to the RXDID for PROTO_XTR_NONE */
- rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ rxq->rxdid = ICE_RXDID_COMMS_OVS;
break;
}
@@ -1608,7 +1612,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
ts_ns = ice_tstamp_convert_32b_64b(hw,
@@ -1925,7 +1929,7 @@ ice_recv_scattered_pkts(void *rx_queue,
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
@@ -2356,7 +2360,7 @@ ice_recv_pkts(void *rx_queue,
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
+ rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index e1c644fb63..146dc1f95d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -89,7 +89,7 @@ struct ice_rx_queue {
bool rx_deferred_start; /* don't start this queue in dev start */
uint8_t proto_xtr; /* Protocol extraction from flexible descriptor */
uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
- ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
+ uint32_t rxdid; /* Receive Flex Descriptor profile ID */
ice_rx_release_mbufs_t rx_rel_mbufs;
uint64_t offloads;
uint32_t time_high;
--
2.27.0
next prev parent reply other threads:[~2021-10-26 1:56 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-21 3:35 [dpdk-stable] [PATCH] " dapengx.yu
2021-10-22 7:55 ` Wang, Haiyue
2021-10-22 8:57 ` [dpdk-stable] [PATCH v2] " dapengx.yu
2021-10-22 11:08 ` Wang, Haiyue
2021-10-25 3:40 ` [dpdk-stable] [PATCH v3] " dapengx.yu
2021-10-25 5:40 ` [dpdk-stable] [PATCH v4] " dapengx.yu
2021-10-25 5:47 ` Wang, Haiyue
2021-10-26 1:55 ` dapengx.yu [this message]
2021-10-27 3:28 ` [dpdk-stable] [PATCH v5] " Zhang, Qi Z
2022-02-24 18:00 ` [dpdk-dev] " Navin Srinivas
2022-02-25 1:34 ` Zhang, Qi Z
2022-02-25 10:24 ` Navin Srinivas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211026015542.1416819-1-dapengx.yu@intel.com \
--to=dapengx.yu@intel.com \
--cc=dev@dpdk.org \
--cc=haiyue.wang@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).