* [PATCH 20.11 v2] net/iavf: fix multi-process shared data
@ 2021-11-11 5:48 dapengx.yu
2021-11-27 14:27 ` Xueming(Steven) Li
0 siblings, 1 reply; 2+ messages in thread
From: dapengx.yu @ 2021-11-11 5:48 UTC (permalink / raw)
To: stable; +Cc: Dapeng Yu
From: Dapeng Yu <dapengx.yu@intel.com>
[ upstream commit 435d523112ccef6cb58edba3062ea05a2f6544fc ]
When the iavf_adapter instance is not initialized completedly in the
primary process, the secondary process accesses its "rte_eth_dev"
member, it causes secondary process crash.
This patch replaces eth_dev with eth_dev_data in iavf_adapter.
Fixes: f978c1c9b3b5 ("net/iavf: add RSS hash parsing in AVX path")
Fixes: 9c9aa0040344 ("net/iavf: add offload path for Rx AVX512 flex descriptor")
Fixes: 63660ea3ee0b ("net/iavf: add RSS hash parsing in SSE path")
Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
---
V2:
* Simplify parameters of iavf_request_queues
* Clean commit log
---
drivers/net/iavf/iavf.h | 8 +++---
drivers/net/iavf/iavf_ethdev.c | 12 ++++----
drivers/net/iavf/iavf_fdir.c | 4 +--
drivers/net/iavf/iavf_rxtx.h | 4 +--
drivers/net/iavf/iavf_rxtx_vec_avx2.c | 8 ++++--
drivers/net/iavf/iavf_rxtx_vec_avx512.c | 8 ++++--
drivers/net/iavf/iavf_rxtx_vec_sse.c | 7 +++--
drivers/net/iavf/iavf_vchnl.c | 38 ++++++++++++-------------
8 files changed, 48 insertions(+), 41 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 3328bd9327..b44b4c5ee0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -166,6 +166,8 @@ struct iavf_info {
struct iavf_fdir_info fdir; /* flow director info */
/* indicate large VF support enabled or not */
bool lv_enabled;
+
+ struct rte_eth_dev *eth_dev;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -194,7 +196,7 @@ struct iavf_devargs {
/* Structure to store private data for each VF instance. */
struct iavf_adapter {
struct iavf_hw hw;
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev_data *dev_data;
struct iavf_info vf;
bool rx_bulk_alloc_allowed;
@@ -220,8 +222,6 @@ struct iavf_adapter {
(&(((struct iavf_vsi *)vsi)->adapter->hw))
#define IAVF_VSI_TO_VF(vsi) \
(&(((struct iavf_vsi *)vsi)->adapter->vf))
-#define IAVF_VSI_TO_ETH_DEV(vsi) \
- (((struct iavf_vsi *)vsi)->adapter->eth_dev)
static inline void
iavf_init_adminq_parameter(struct iavf_hw *hw)
@@ -325,6 +325,6 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
struct rte_ether_addr *mc_addrs,
uint32_t mc_addrs_num, bool add);
-int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num);
+int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num);
int iavf_get_max_rss_queue_region(struct iavf_adapter *adapter);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index ed69ba483e..34877e9609 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -250,15 +250,15 @@ iavf_init_rss(struct iavf_adapter *adapter)
uint16_t i, j, nb_q;
int ret;
- rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
- nb_q = RTE_MIN(adapter->eth_dev->data->nb_rx_queues,
+ rss_conf = &adapter->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = RTE_MIN(adapter->dev_data->nb_rx_queues,
vf->max_rss_qregion);
if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
PMD_DRV_LOG(DEBUG, "RSS is not supported");
return -ENOTSUP;
}
- if (adapter->eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (adapter->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
/* set all lut items to default queue */
for (i = 0; i < vf->vf_res->rss_lut_size; i++)
@@ -306,7 +306,7 @@ iavf_queues_req_reset(struct rte_eth_dev *dev, uint16_t num)
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
int ret;
- ret = iavf_request_queues(ad, num);
+ ret = iavf_request_queues(dev, num);
if (ret) {
PMD_DRV_LOG(ERR, "request queues from PF failed");
return ret;
@@ -1791,6 +1791,8 @@ iavf_init_vf(struct rte_eth_dev *dev)
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ vf->eth_dev = dev;
+
err = iavf_parse_devargs(dev);
if (err) {
PMD_INIT_LOG(ERR, "Failed to parse devargs");
@@ -1985,7 +1987,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
hw->bus.func = pci_dev->addr.function;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->back = IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
- adapter->eth_dev = eth_dev;
+ adapter->dev_data = eth_dev->data;
adapter->stopped = 1;
if (iavf_init_vf(eth_dev) != 0) {
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 253213f8b5..01a3fc3f98 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -287,7 +287,7 @@ iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
}
}
- if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
+ if (rss->queue[rss->queue_num - 1] >= ad->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
"Invalid queue region indexes.");
@@ -367,7 +367,7 @@ iavf_fdir_parse_action(struct iavf_adapter *ad,
filter_action->act_conf.queue.index = act_q->index;
if (filter_action->act_conf.queue.index >=
- ad->eth_dev->data->nb_rx_queues) {
+ ad->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid queue for FDIR.");
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index d4b4935be6..7a2ed65d44 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -540,8 +540,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
int i; \
- for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
- struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+ for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
+ struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
if (!rxq) \
continue; \
rxq->fdir_enabled = on; \
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index 8f28afc8c5..233e3c445c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -640,7 +640,10 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
{
#define IAVF_DESCS_PER_LOOP_AVX 8
- const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+ uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ const uint32_t *type_table = adapter->ptype_tbl;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
0, rxq->mbuf_initializer);
@@ -996,8 +999,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 584d12ea36..89aec1732e 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -638,7 +638,10 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, uint8_t *split_packet)
{
- const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
+
+ uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ const uint32_t *type_table = adapter->ptype_tbl;
const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
rxq->mbuf_initializer);
@@ -1011,8 +1014,7 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
_mm_load_si128
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 75c77f9d32..6b2baf2a0b 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -644,7 +644,9 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
- const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
+ uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ const uint32_t *ptype_tbl = adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16
(0, 0, 0, /* ignore non-length fields */
-rxq->crc_len, /* sub crc on data_len */
@@ -817,8 +819,7 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
+ if (offloads & DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
_mm_load_si128
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index c17ae06227..a50b643a34 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -71,7 +71,6 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
{
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
- struct rte_eth_dev *dev = adapter->eth_dev;
struct iavf_arq_event_info event;
enum iavf_aq_result result = IAVF_MSG_NON;
enum virtchnl_ops opcode;
@@ -113,7 +112,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
speed = vpe->event_data.link_event.link_speed;
vf->link_speed = iavf_convert_link_speed(speed);
}
- iavf_dev_link_update(dev, 0);
+ iavf_dev_link_update(vf->eth_dev, 0);
PMD_DRV_LOG(INFO, "Link status update:%s",
vf->link_up ? "up" : "down");
break;
@@ -532,8 +531,8 @@ iavf_enable_queues(struct iavf_adapter *adapter)
memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = vf->vsi_res->vsi_id;
- queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
- queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+ queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
args.ops = VIRTCHNL_OP_ENABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
@@ -560,8 +559,8 @@ iavf_disable_queues(struct iavf_adapter *adapter)
memset(&queue_select, 0, sizeof(queue_select));
queue_select.vsi_id = vf->vsi_res->vsi_id;
- queue_select.rx_queues = BIT(adapter->eth_dev->data->nb_rx_queues) - 1;
- queue_select.tx_queues = BIT(adapter->eth_dev->data->nb_tx_queues) - 1;
+ queue_select.rx_queues = BIT(adapter->dev_data->nb_rx_queues) - 1;
+ queue_select.tx_queues = BIT(adapter->dev_data->nb_tx_queues) - 1;
args.ops = VIRTCHNL_OP_DISABLE_QUEUES;
args.in_args = (u8 *)&queue_select;
@@ -631,12 +630,12 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter)
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
- adapter->eth_dev->data->nb_tx_queues;
+ adapter->dev_data->nb_tx_queues;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
- adapter->eth_dev->data->nb_rx_queues;
+ adapter->dev_data->nb_rx_queues;
args.ops = VIRTCHNL_OP_ENABLE_QUEUES_V2;
args.in_args = (u8 *)queue_select;
@@ -675,12 +674,12 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter)
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].type = VIRTCHNL_QUEUE_TYPE_TX;
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].start_queue_id = 0;
queue_chunk[VIRTCHNL_QUEUE_TYPE_TX].num_queues =
- adapter->eth_dev->data->nb_tx_queues;
+ adapter->dev_data->nb_tx_queues;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].type = VIRTCHNL_QUEUE_TYPE_RX;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].start_queue_id = 0;
queue_chunk[VIRTCHNL_QUEUE_TYPE_RX].num_queues =
- adapter->eth_dev->data->nb_rx_queues;
+ adapter->dev_data->nb_rx_queues;
args.ops = VIRTCHNL_OP_DISABLE_QUEUES_V2;
args.in_args = (u8 *)queue_select;
@@ -811,9 +810,9 @@ iavf_configure_queues(struct iavf_adapter *adapter,
uint16_t num_queue_pairs, uint16_t index)
{
struct iavf_rx_queue **rxq =
- (struct iavf_rx_queue **)adapter->eth_dev->data->rx_queues;
+ (struct iavf_rx_queue **)adapter->dev_data->rx_queues;
struct iavf_tx_queue **txq =
- (struct iavf_tx_queue **)adapter->eth_dev->data->tx_queues;
+ (struct iavf_tx_queue **)adapter->dev_data->tx_queues;
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct virtchnl_vsi_queue_config_info *vc_config;
struct virtchnl_queue_pair_info *vc_qp;
@@ -837,7 +836,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
vc_qp->txq.queue_id = i;
/* Virtchnnl configure tx queues by pairs */
- if (i < adapter->eth_dev->data->nb_tx_queues) {
+ if (i < adapter->dev_data->nb_tx_queues) {
vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
}
@@ -846,7 +845,7 @@ iavf_configure_queues(struct iavf_adapter *adapter,
vc_qp->rxq.queue_id = i;
vc_qp->rxq.max_pkt_size = vf->max_pkt_len;
- if (i >= adapter->eth_dev->data->nb_rx_queues)
+ if (i >= adapter->dev_data->nb_rx_queues)
continue;
/* Virtchnnl configure rx queues by pairs */
@@ -915,7 +914,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter)
return -ENOMEM;
map_info->num_vectors = vf->nb_msix;
- for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) {
+ for (i = 0; i < adapter->dev_data->nb_rx_queues; i++) {
vecmap =
&map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base];
vecmap->vsi_id = vf->vsi_res->vsi_id;
@@ -994,7 +993,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
j = 0;
len = sizeof(struct virtchnl_ether_addr_list);
for (i = begin; i < IAVF_NUM_MACADDR_MAX; i++, next_begin++) {
- addr = &adapter->eth_dev->data->mac_addrs[i];
+ addr = &adapter->dev_data->mac_addrs[i];
if (rte_is_zero_ether_addr(addr))
continue;
len += sizeof(struct virtchnl_ether_addr);
@@ -1011,7 +1010,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add)
}
for (i = begin; i < next_begin; i++) {
- addr = &adapter->eth_dev->data->mac_addrs[i];
+ addr = &adapter->dev_data->mac_addrs[i];
if (rte_is_zero_ether_addr(addr))
continue;
rte_memcpy(list->list[j].addr, addr->addr_bytes,
@@ -1397,9 +1396,10 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter,
}
int
-iavf_request_queues(struct iavf_adapter *adapter, uint16_t num)
+iavf_request_queues(struct rte_eth_dev *dev, uint16_t num)
{
- struct rte_eth_dev *dev = adapter->eth_dev;
+ struct iavf_adapter *adapter =
+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct virtchnl_vf_res_request vfres;
--
2.27.0
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH 20.11 v2] net/iavf: fix multi-process shared data
2021-11-11 5:48 [PATCH 20.11 v2] net/iavf: fix multi-process shared data dapengx.yu
@ 2021-11-27 14:27 ` Xueming(Steven) Li
0 siblings, 0 replies; 2+ messages in thread
From: Xueming(Steven) Li @ 2021-11-27 14:27 UTC (permalink / raw)
To: dapengx.yu, stable
On Thu, 2021-11-11 at 13:48 +0800, dapengx.yu@intel.com wrote:
> From: Dapeng Yu <dapengx.yu@intel.com>
>
> [ upstream commit 435d523112ccef6cb58edba3062ea05a2f6544fc ]
>
> When the iavf_adapter instance is not initialized completedly in the
> primary process, the secondary process accesses its "rte_eth_dev"
> member, it causes secondary process crash.
>
> This patch replaces eth_dev with eth_dev_data in iavf_adapter.
>
> Fixes: f978c1c9b3b5 ("net/iavf: add RSS hash parsing in AVX path")
> Fixes: 9c9aa0040344 ("net/iavf: add offload path for Rx AVX512 flex descriptor")
> Fixes: 63660ea3ee0b ("net/iavf: add RSS hash parsing in SSE path")
>
> Signed-off-by: Dapeng Yu <dapengx.yu@intel.com>
> ---
> V2:
> * Simplify parameters of iavf_request_queues
> * Clean commit log
> ---
Applied to 20.11.4 list, thanks!
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-11-27 14:27 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-11 5:48 [PATCH 20.11 v2] net/iavf: fix multi-process shared data dapengx.yu
2021-11-27 14:27 ` Xueming(Steven) Li
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).