DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1] net/ice: update null check for TxPP support
@ 2025-06-27  0:20 Soumyadeep Hore
  2025-06-27  0:43 ` [PATCH v2] " Soumyadeep Hore
  2025-06-27  9:10 ` [PATCH v1] " Bruce Richardson
  0 siblings, 2 replies; 3+ messages in thread
From: Soumyadeep Hore @ 2025-06-27  0:20 UTC (permalink / raw)
  To: dev, bruce.richardson; +Cc: aman.deep.singh, manoj.kumar.subbarao

The absence of NULL checking was causing segmentation fault.

Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
 drivers/net/intel/ice/ice_rxtx.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index e33fd74543..de2902bba4 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -865,7 +865,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	/* record what kind of descriptor cleanup we need on teardown */
 	txq->vector_tx = ad->tx_vec_allowed;
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq && txq->tsq->ts_flag > 0) {
 		struct ice_aqc_set_txtime_qgrp *ts_elem;
 		u8 ts_buf_len = ice_struct_size(ts_elem, txtimeqs, 1);
 		struct ice_txtime_ctx txtime_ctx = { 0 };
@@ -1118,7 +1118,7 @@ ice_reset_tx_queue(struct ci_tx_queue *txq)
 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq && txq->tsq->ts_flag > 0) {
 		for (i = 0; i < txq->tsq->nb_ts_desc; i++) {
 			volatile struct ice_ts_desc *tsd =
 							&txq->tsq->ice_ts_ring[i];
@@ -1161,7 +1161,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	q_ids[0] = txq->reg_idx;
 	q_teids[0] = txq->q_teid;
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq && txq->tsq->ts_flag > 0) {
 		struct ice_aqc_ena_dis_txtime_qgrp txtime_pg;
 		dev->dev_ops->timesync_disable(dev);
 		status = ice_aq_ena_dis_txtimeq(hw, q_ids[0], 1, 0,
@@ -3162,7 +3162,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	tx_id = txq->tx_tail;
 	txe = &sw_ring[tx_id];
 
-	if (txq->tsq->ts_flag > 0)
+	if (txq->tsq && txq->tsq->ts_flag > 0)
 		ts_id = txq->tsq->ts_tail;
 
 	/* Check if the descriptor ring needs to be cleaned. */
@@ -3353,7 +3353,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
 					 ICE_TXD_QW1_CMD_S);
 
-		if (txq->tsq->ts_flag > 0) {
+		if (txq->tsq && txq->tsq->ts_flag > 0) {
 			uint64_t txtime = *RTE_MBUF_DYNFIELD(tx_pkt,
 					txq->tsq->ts_offset, uint64_t *);
 			uint32_t tstamp = (uint32_t)(txtime % NS_PER_S) >>
@@ -3383,7 +3383,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	}
 end_of_tx:
 	/* update Tail register */
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq && txq->tsq->ts_flag > 0) {
 		ICE_PCI_REG_WRITE(txq->qtx_tail, ts_id);
 		txq->tsq->ts_tail = ts_id;
 	} else {
-- 
2.43.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2] net/ice: update null check for TxPP support
  2025-06-27  0:20 [PATCH v1] net/ice: update null check for TxPP support Soumyadeep Hore
@ 2025-06-27  0:43 ` Soumyadeep Hore
  2025-06-27  9:10 ` [PATCH v1] " Bruce Richardson
  1 sibling, 0 replies; 3+ messages in thread
From: Soumyadeep Hore @ 2025-06-27  0:43 UTC (permalink / raw)
  To: dev, bruce.richardson; +Cc: aman.deep.singh, manoj.kumar.subbarao

The absence of NULL checking was causing segmentation fault.

Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
 drivers/net/intel/ice/ice_rxtx.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index e33fd74543..814d8793d2 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -865,7 +865,7 @@ ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	/* record what kind of descriptor cleanup we need on teardown */
 	txq->vector_tx = ad->tx_vec_allowed;
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq != NULL && txq->tsq->ts_flag > 0) {
 		struct ice_aqc_set_txtime_qgrp *ts_elem;
 		u8 ts_buf_len = ice_struct_size(ts_elem, txtimeqs, 1);
 		struct ice_txtime_ctx txtime_ctx = { 0 };
@@ -1118,7 +1118,7 @@ ice_reset_tx_queue(struct ci_tx_queue *txq)
 	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
 	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq != NULL && txq->tsq->ts_flag > 0) {
 		for (i = 0; i < txq->tsq->nb_ts_desc; i++) {
 			volatile struct ice_ts_desc *tsd =
 							&txq->tsq->ice_ts_ring[i];
@@ -1161,7 +1161,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	q_ids[0] = txq->reg_idx;
 	q_teids[0] = txq->q_teid;
 
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq != NULL && txq->tsq->ts_flag > 0) {
 		struct ice_aqc_ena_dis_txtime_qgrp txtime_pg;
 		dev->dev_ops->timesync_disable(dev);
 		status = ice_aq_ena_dis_txtimeq(hw, q_ids[0], 1, 0,
@@ -3162,7 +3162,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	tx_id = txq->tx_tail;
 	txe = &sw_ring[tx_id];
 
-	if (txq->tsq->ts_flag > 0)
+	if (txq->tsq != NULL && txq->tsq->ts_flag > 0)
 		ts_id = txq->tsq->ts_tail;
 
 	/* Check if the descriptor ring needs to be cleaned. */
@@ -3353,7 +3353,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			rte_cpu_to_le_64(((uint64_t)td_cmd) <<
 					 ICE_TXD_QW1_CMD_S);
 
-		if (txq->tsq->ts_flag > 0) {
+		if (txq->tsq != NULL && txq->tsq->ts_flag > 0) {
 			uint64_t txtime = *RTE_MBUF_DYNFIELD(tx_pkt,
 					txq->tsq->ts_offset, uint64_t *);
 			uint32_t tstamp = (uint32_t)(txtime % NS_PER_S) >>
@@ -3383,7 +3383,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	}
 end_of_tx:
 	/* update Tail register */
-	if (txq->tsq->ts_flag > 0) {
+	if (txq->tsq != NULL && txq->tsq->ts_flag > 0) {
 		ICE_PCI_REG_WRITE(txq->qtx_tail, ts_id);
 		txq->tsq->ts_tail = ts_id;
 	} else {
-- 
2.43.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v1] net/ice: update null check for TxPP support
  2025-06-27  0:20 [PATCH v1] net/ice: update null check for TxPP support Soumyadeep Hore
  2025-06-27  0:43 ` [PATCH v2] " Soumyadeep Hore
@ 2025-06-27  9:10 ` Bruce Richardson
  1 sibling, 0 replies; 3+ messages in thread
From: Bruce Richardson @ 2025-06-27  9:10 UTC (permalink / raw)
  To: Soumyadeep Hore; +Cc: dev, aman.deep.singh, manoj.kumar.subbarao

On Fri, Jun 27, 2025 at 12:20:44AM +0000, Soumyadeep Hore wrote:
> The absence of NULL checking was causing segmentation fault.
> 
> Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
> ---
>  drivers/net/intel/ice/ice_rxtx.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
Patch applied and squashed to original commit in next-net-intel tree.

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-06-27  9:11 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-06-27  0:20 [PATCH v1] net/ice: update null check for TxPP support Soumyadeep Hore
2025-06-27  0:43 ` [PATCH v2] " Soumyadeep Hore
2025-06-27  9:10 ` [PATCH v1] " Bruce Richardson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).