DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured
@ 2018-01-09 16:19 Gage Eads
  2018-01-09 17:29 ` Van Haaren, Harry
  2018-01-10 10:26 ` Pavan Nikhilesh
  0 siblings, 2 replies; 4+ messages in thread
From: Gage Eads @ 2018-01-09 16:19 UTC (permalink / raw)
  To: dev; +Cc: jerin.jacob, harry.van.haaren, pbhagavatula, santosh.shukla

This commit fixes a bug in which, when the sw PMD is reconfigured, it would
leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD
initializes all IQs at eventdev start time and releases all IQ chunk
pointers at eventdev stop time (which has the consequence that any events
in a queue when the eventdev is stopped will be lost). This approach should
be resilient to any reconfiguration done between the stop and start, such
as adding or removing queues.

This commit also fixes two potential issues in iq_chunk.h. iq_init()
now initializes the IQ's count field to 0, and iq_dequeue_burst() sets
iq->head to the appropriate next pointer.

Fixes: 5b5e476e59a4 ("event/sw: use dynamically-sized IQs")
Signed-off-by: Gage Eads <gage.eads@intel.com>
---
 drivers/event/sw/iq_chunk.h | 14 +++++++++-
 drivers/event/sw/sw_evdev.c | 62 ++++++++++++++++++++++++++++++++-------------
 2 files changed, 57 insertions(+), 19 deletions(-)

diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h
index 29f5a35..2fa724c 100644
--- a/drivers/event/sw/iq_chunk.h
+++ b/drivers/event/sw/iq_chunk.h
@@ -73,12 +73,24 @@ iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
 }
 
 static __rte_always_inline void
+iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
+{
+	while (head) {
+		struct sw_queue_chunk *next;
+		next = head->next;
+		iq_free_chunk(sw, head);
+		head = next;
+	}
+}
+
+static __rte_always_inline void
 iq_init(struct sw_evdev *sw, struct sw_iq *iq)
 {
 	iq->head = iq_alloc_chunk(sw);
 	iq->tail = iq->head;
 	iq->head_idx = 0;
 	iq->tail_idx = 0;
+	iq->count = 0;
 }
 
 static __rte_always_inline void
@@ -154,7 +166,7 @@ iq_dequeue_burst(struct sw_evdev *sw,
 
 done:
 	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
-		struct sw_queue_chunk *next = iq->head->next;
+		struct sw_queue_chunk *next = current->next;
 		iq_free_chunk(sw, current);
 		iq->head = next;
 		iq->head_idx = 0;
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 1ef6340..7430a5d 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -245,9 +245,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
 	char buf[IQ_ROB_NAMESIZE];
 	struct sw_qid *qid = &sw->qids[idx];
 
-	for (i = 0; i < SW_IQS_MAX; i++)
-		iq_init(sw, &qid->iq[i]);
-
 	/* Initialize the FID structures to no pinning (-1), and zero packets */
 	const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
 	for (i = 0; i < RTE_DIM(qid->fids); i++)
@@ -325,11 +322,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
 	return 0;
 
 cleanup:
-	for (i = 0; i < SW_IQS_MAX; i++) {
-		if (qid->iq[i].head)
-			iq_free_chunk(sw, qid->iq[i].head);
-	}
-
 	if (qid->reorder_buffer) {
 		rte_free(qid->reorder_buffer);
 		qid->reorder_buffer = NULL;
@@ -348,13 +340,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id)
 {
 	struct sw_evdev *sw = sw_pmd_priv(dev);
 	struct sw_qid *qid = &sw->qids[id];
-	uint32_t i;
-
-	for (i = 0; i < SW_IQS_MAX; i++) {
-		if (!qid->iq[i].head)
-			continue;
-		iq_free_chunk(sw, qid->iq[i].head);
-	}
 
 	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
 		rte_free(qid->reorder_buffer);
@@ -388,6 +373,41 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 }
 
 static void
+sw_init_qid_iqs(struct sw_evdev *sw)
+{
+	int i, j;
+
+	/* Initialize the IQ memory of all configured qids */
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct sw_qid *qid = &sw->qids[i];
+
+		if (!qid->initialized)
+			continue;
+
+		for (j = 0; j < SW_IQS_MAX; j++)
+			iq_init(sw, &qid->iq[j]);
+	}
+}
+
+static void
+sw_clean_qid_iqs(struct sw_evdev *sw)
+{
+	int i, j;
+
+	/* Release the IQ memory of all configured qids */
+	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+		struct sw_qid *qid = &sw->qids[i];
+
+		for (j = 0; j < SW_IQS_MAX; j++) {
+			if (!qid->iq[j].head)
+				continue;
+			iq_free_chunk_list(sw, qid->iq[j].head);
+			qid->iq[j].head = NULL;
+		}
+	}
+}
+
+static void
 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 				 struct rte_event_queue_conf *conf)
 {
@@ -434,7 +454,10 @@ sw_dev_configure(const struct rte_eventdev *dev)
 	num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
 			sw->qid_count*SW_IQS_MAX*2;
 
-	/* If this is a reconfiguration, free the previous IQ allocation */
+	/* If this is a reconfiguration, free the previous IQ allocation. All
+	 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
+	 * will be reinitialized in sw_start().
+	 */
 	if (sw->chunks)
 		rte_free(sw->chunks);
 
@@ -667,8 +690,8 @@ sw_start(struct rte_eventdev *dev)
 
 	/* check all queues are configured and mapped to ports*/
 	for (i = 0; i < sw->qid_count; i++)
-		if (sw->qids[i].iq[0].head == NULL ||
-				sw->qids[i].cq_num_mapped_cqs == 0) {
+		if (!sw->qids[i].initialized ||
+		    sw->qids[i].cq_num_mapped_cqs == 0) {
 			SW_LOG_ERR("Queue %d not configured\n", i);
 			return -ENOLINK;
 		}
@@ -689,6 +712,8 @@ sw_start(struct rte_eventdev *dev)
 		}
 	}
 
+	sw_init_qid_iqs(sw);
+
 	if (sw_xstats_init(sw) < 0)
 		return -EINVAL;
 
@@ -702,6 +727,7 @@ static void
 sw_stop(struct rte_eventdev *dev)
 {
 	struct sw_evdev *sw = sw_pmd_priv(dev);
+	sw_clean_qid_iqs(sw);
 	sw_xstats_uninit(sw);
 	sw->started = 0;
 	rte_smp_wmb();
-- 
2.7.4

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured
  2018-01-09 16:19 [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured Gage Eads
@ 2018-01-09 17:29 ` Van Haaren, Harry
  2018-01-10 10:26 ` Pavan Nikhilesh
  1 sibling, 0 replies; 4+ messages in thread
From: Van Haaren, Harry @ 2018-01-09 17:29 UTC (permalink / raw)
  To: Eads, Gage, dev; +Cc: jerin.jacob, pbhagavatula, santosh.shukla

> From: Eads, Gage
> Sent: Tuesday, January 9, 2018 4:20 PM
> To: dev@dpdk.org
> Cc: jerin.jacob@caviumnetworks.com; Van Haaren, Harry
> <harry.van.haaren@intel.com>; pbhagavatula@caviumnetworks.com;
> santosh.shukla@caviumnetworks.com
> Subject: [PATCH] event/sw: remove stale IQ references when reconfigured
> 
> This commit fixes a bug in which, when the sw PMD is reconfigured, it would
> leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD
> initializes all IQs at eventdev start time and releases all IQ chunk
> pointers at eventdev stop time (which has the consequence that any events
> in a queue when the eventdev is stopped will be lost). This approach should
> be resilient to any reconfiguration done between the stop and start, such
> as adding or removing queues.
> 
> This commit also fixes two potential issues in iq_chunk.h. iq_init()
> now initializes the IQ's count field to 0, and iq_dequeue_burst() sets
> iq->head to the appropriate next pointer.
> 
> Fixes: 5b5e476e59a4 ("event/sw: use dynamically-sized IQs")
> Signed-off-by: Gage Eads <gage.eads@intel.com>


After reproducing the error reliably, I can confirm this patch fixes here.

Also, I think this patch deserves a reported by tag. Perhaps it can be added on apply?
Reported-By: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

@Pavan, as the reporter, would you double check please?

With a +1 / Ack from Pavan, lets merge this :)
Reviewed-by: Harry van Haaren <harry.van.haaren@intel.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured
  2018-01-09 16:19 [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured Gage Eads
  2018-01-09 17:29 ` Van Haaren, Harry
@ 2018-01-10 10:26 ` Pavan Nikhilesh
  2018-01-10 19:00   ` Jerin Jacob
  1 sibling, 1 reply; 4+ messages in thread
From: Pavan Nikhilesh @ 2018-01-10 10:26 UTC (permalink / raw)
  To: Gage Eads, jerin.jacob, harry.van.haaren, santosh.shukla; +Cc: dev

On Tue, Jan 09, 2018 at 10:19:35AM -0600, Gage Eads wrote:
> This commit fixes a bug in which, when the sw PMD is reconfigured, it would
> leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD
> initializes all IQs at eventdev start time and releases all IQ chunk
> pointers at eventdev stop time (which has the consequence that any events
> in a queue when the eventdev is stopped will be lost). This approach should
> be resilient to any reconfiguration done between the stop and start, such
> as adding or removing queues.
>
> This commit also fixes two potential issues in iq_chunk.h. iq_init()
> now initializes the IQ's count field to 0, and iq_dequeue_burst() sets
> iq->head to the appropriate next pointer.
>
> Fixes: 5b5e476e59a4 ("event/sw: use dynamically-sized IQs")
> Signed-off-by: Gage Eads <gage.eads@intel.com>
> ---
>  drivers/event/sw/iq_chunk.h | 14 +++++++++-
>  drivers/event/sw/sw_evdev.c | 62 ++++++++++++++++++++++++++++++++-------------
>  2 files changed, 57 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h
> index 29f5a35..2fa724c 100644
> --- a/drivers/event/sw/iq_chunk.h
> +++ b/drivers/event/sw/iq_chunk.h
> @@ -73,12 +73,24 @@ iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk *chunk)
>  }
>
>  static __rte_always_inline void
> +iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head)
> +{
> +	while (head) {
> +		struct sw_queue_chunk *next;
> +		next = head->next;
> +		iq_free_chunk(sw, head);
> +		head = next;
> +	}
> +}
> +
> +static __rte_always_inline void
>  iq_init(struct sw_evdev *sw, struct sw_iq *iq)
>  {
>  	iq->head = iq_alloc_chunk(sw);
>  	iq->tail = iq->head;
>  	iq->head_idx = 0;
>  	iq->tail_idx = 0;
> +	iq->count = 0;
>  }
>
>  static __rte_always_inline void
> @@ -154,7 +166,7 @@ iq_dequeue_burst(struct sw_evdev *sw,
>
>  done:
>  	if (unlikely(index == SW_EVS_PER_Q_CHUNK)) {
> -		struct sw_queue_chunk *next = iq->head->next;
> +		struct sw_queue_chunk *next = current->next;
>  		iq_free_chunk(sw, current);
>  		iq->head = next;
>  		iq->head_idx = 0;
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 1ef6340..7430a5d 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -245,9 +245,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
>  	char buf[IQ_ROB_NAMESIZE];
>  	struct sw_qid *qid = &sw->qids[idx];
>
> -	for (i = 0; i < SW_IQS_MAX; i++)
> -		iq_init(sw, &qid->iq[i]);
> -
>  	/* Initialize the FID structures to no pinning (-1), and zero packets */
>  	const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
>  	for (i = 0; i < RTE_DIM(qid->fids); i++)
> @@ -325,11 +322,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type,
>  	return 0;
>
>  cleanup:
> -	for (i = 0; i < SW_IQS_MAX; i++) {
> -		if (qid->iq[i].head)
> -			iq_free_chunk(sw, qid->iq[i].head);
> -	}
> -
>  	if (qid->reorder_buffer) {
>  		rte_free(qid->reorder_buffer);
>  		qid->reorder_buffer = NULL;
> @@ -348,13 +340,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id)
>  {
>  	struct sw_evdev *sw = sw_pmd_priv(dev);
>  	struct sw_qid *qid = &sw->qids[id];
> -	uint32_t i;
> -
> -	for (i = 0; i < SW_IQS_MAX; i++) {
> -		if (!qid->iq[i].head)
> -			continue;
> -		iq_free_chunk(sw, qid->iq[i].head);
> -	}
>
>  	if (qid->type == RTE_SCHED_TYPE_ORDERED) {
>  		rte_free(qid->reorder_buffer);
> @@ -388,6 +373,41 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
>  }
>
>  static void
> +sw_init_qid_iqs(struct sw_evdev *sw)
> +{
> +	int i, j;
> +
> +	/* Initialize the IQ memory of all configured qids */
> +	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
> +		struct sw_qid *qid = &sw->qids[i];
> +
> +		if (!qid->initialized)
> +			continue;
> +
> +		for (j = 0; j < SW_IQS_MAX; j++)
> +			iq_init(sw, &qid->iq[j]);
> +	}
> +}
> +
> +static void
> +sw_clean_qid_iqs(struct sw_evdev *sw)
> +{
> +	int i, j;
> +
> +	/* Release the IQ memory of all configured qids */
> +	for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
> +		struct sw_qid *qid = &sw->qids[i];
> +
> +		for (j = 0; j < SW_IQS_MAX; j++) {
> +			if (!qid->iq[j].head)
> +				continue;
> +			iq_free_chunk_list(sw, qid->iq[j].head);
> +			qid->iq[j].head = NULL;
> +		}
> +	}
> +}
> +
> +static void
>  sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
>  				 struct rte_event_queue_conf *conf)
>  {
> @@ -434,7 +454,10 @@ sw_dev_configure(const struct rte_eventdev *dev)
>  	num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
>  			sw->qid_count*SW_IQS_MAX*2;
>
> -	/* If this is a reconfiguration, free the previous IQ allocation */
> +	/* If this is a reconfiguration, free the previous IQ allocation. All
> +	 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
> +	 * will be reinitialized in sw_start().
> +	 */
>  	if (sw->chunks)
>  		rte_free(sw->chunks);
>
> @@ -667,8 +690,8 @@ sw_start(struct rte_eventdev *dev)
>
>  	/* check all queues are configured and mapped to ports*/
>  	for (i = 0; i < sw->qid_count; i++)
> -		if (sw->qids[i].iq[0].head == NULL ||
> -				sw->qids[i].cq_num_mapped_cqs == 0) {
> +		if (!sw->qids[i].initialized ||
> +		    sw->qids[i].cq_num_mapped_cqs == 0) {
>  			SW_LOG_ERR("Queue %d not configured\n", i);
>  			return -ENOLINK;
>  		}
> @@ -689,6 +712,8 @@ sw_start(struct rte_eventdev *dev)
>  		}
>  	}
>
> +	sw_init_qid_iqs(sw);
> +
>  	if (sw_xstats_init(sw) < 0)
>  		return -EINVAL;
>
> @@ -702,6 +727,7 @@ static void
>  sw_stop(struct rte_eventdev *dev)
>  {
>  	struct sw_evdev *sw = sw_pmd_priv(dev);
> +	sw_clean_qid_iqs(sw);
>  	sw_xstats_uninit(sw);
>  	sw->started = 0;
>  	rte_smp_wmb();
> --
> 2.7.4
>

Sw eventdev works well alongside Rx adapter now.

Cheers,
Pavan.

Acked-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured
  2018-01-10 10:26 ` Pavan Nikhilesh
@ 2018-01-10 19:00   ` Jerin Jacob
  0 siblings, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2018-01-10 19:00 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Gage Eads, harry.van.haaren, santosh.shukla, dev

-----Original Message-----
> Date: Wed, 10 Jan 2018 15:56:16 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: Gage Eads <gage.eads@intel.com>, jerin.jacob@caviumnetworks.com,
>  harry.van.haaren@intel.com, santosh.shukla@caviumnetworks.com
> Cc: dev@dpdk.org
> Subject: Re: [PATCH] event/sw: remove stale IQ references when reconfigured
> User-Agent: NeoMutt/20170609 (1.8.3)
> 
> On Tue, Jan 09, 2018 at 10:19:35AM -0600, Gage Eads wrote:
> > This commit fixes a bug in which, when the sw PMD is reconfigured, it would
> > leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD
> > initializes all IQs at eventdev start time and releases all IQ chunk
> > pointers at eventdev stop time (which has the consequence that any events
> > in a queue when the eventdev is stopped will be lost). This approach should
> > be resilient to any reconfiguration done between the stop and start, such
> > as adding or removing queues.
> >
> > This commit also fixes two potential issues in iq_chunk.h. iq_init()
> > now initializes the IQ's count field to 0, and iq_dequeue_burst() sets
> > iq->head to the appropriate next pointer.
> >
> > Fixes: 5b5e476e59a4 ("event/sw: use dynamically-sized IQs")
> > Signed-off-by: Gage Eads <gage.eads@intel.com>
> Sw eventdev works well alongside Rx adapter now.
> 
> Cheers,
> Pavan.
> 
> Acked-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

Applied to dpdk-next-eventdev/master. Thanks.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-01-10 19:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-09 16:19 [dpdk-dev] [PATCH] event/sw: remove stale IQ references when reconfigured Gage Eads
2018-01-09 17:29 ` Van Haaren, Harry
2018-01-10 10:26 ` Pavan Nikhilesh
2018-01-10 19:00   ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).