automatic DPDK test reports
 help / color / mirror / Atom feed
From: dpdklab@iol.unh.edu
To: test-report@dpdk.org
Cc: dpdk-test-reports@iol.unh.edu
Subject: |WARNING| pw111138-111137 [PATCH] [v3, 3/3] event/cnxk: implement event port quiesce function
Date: Fri, 13 May 2022 14:19:03 -0400 (EDT)	[thread overview]
Message-ID: <20220513181903.192386D50B@noxus.dpdklab.iol.unh.edu> (raw)

[-- Attachment #1: Type: text/plain, Size: 8878 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/111138

_apply patch failure_

Submitter: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>
Date: Friday, May 13 2022 17:58:41 
Applied on: CommitID:c0c305ee9e0e7c9feca6412266a778f330d20c19
Apply patch set 111138-111137 failed:

Checking patch app/test-eventdev/test_perf_common.c...
error: while searching for:
	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
}

void
perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,

error: patch failed: app/test-eventdev/test_perf_common.c:985
error: while searching for:
			events[i].op = RTE_EVENT_OP_RELEASE;
		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
	}
}

void

error: patch failed: app/test-eventdev/test_perf_common.c:1000
Checking patch app/test-eventdev/test_pipeline_common.c...
error: while searching for:
	}
}

void
pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
			uint16_t enq, uint16_t deq)

error: patch failed: app/test-eventdev/test_pipeline_common.c:518
error: while searching for:

		rte_event_enqueue_burst(dev, port, ev, deq);
	}
}

void

error: patch failed: app/test-eventdev/test_pipeline_common.c:542
Checking patch examples/eventdev_pipeline/pipeline_common.h...
error: while searching for:
	}
}

static inline void
worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
	       uint16_t nb_enq, uint16_t nb_deq)

error: patch failed: examples/eventdev_pipeline/pipeline_common.h:140
error: while searching for:
			events[i].op = RTE_EVENT_OP_RELEASE;
		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
	}
}

void set_worker_generic_setup_data(struct setup_data *caps, bool burst);

error: patch failed: examples/eventdev_pipeline/pipeline_common.h:160
Checking patch examples/ipsec-secgw/ipsec_worker.c...
error: while searching for:
		rte_event_enqueue_burst(links[0].eventdev_id,
					links[0].event_port_id, &ev, 1);
	}
}

/*

error: patch failed: examples/ipsec-secgw/ipsec_worker.c:861
error: while searching for:
		rte_event_enqueue_burst(links[0].eventdev_id,
					links[0].event_port_id, &ev, 1);
	}
}

static uint8_t

error: patch failed: examples/ipsec-secgw/ipsec_worker.c:974
Checking patch examples/l2fwd-event/l2fwd_common.c...
error: while searching for:
	}
}

void
l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
			   struct rte_event events[], uint16_t nb_enq,

error: patch failed: examples/l2fwd-event/l2fwd_common.c:128
error: while searching for:
			events[i].op = RTE_EVENT_OP_RELEASE;
		rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
	}
}

error: patch failed: examples/l2fwd-event/l2fwd_common.c:147
Checking patch examples/l3fwd/l3fwd_event.c...
error: while searching for:
	}
}

void
l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
			   struct rte_event events[], uint16_t nb_enq,

error: patch failed: examples/l3fwd/l3fwd_event.c:301
error: while searching for:
			events[i].op = RTE_EVENT_OP_RELEASE;
		rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
	}
}

error: patch failed: examples/l3fwd/l3fwd_event.c:320
Applying patch app/test-eventdev/test_perf_common.c with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch app/test-eventdev/test_pipeline_common.c with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch examples/eventdev_pipeline/pipeline_common.h with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch examples/ipsec-secgw/ipsec_worker.c with 2 rejects...
Hunk #1 applied cleanly.
Rejected hunk #2.
Rejected hunk #3.
Applying patch examples/l2fwd-event/l2fwd_common.c with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch examples/l3fwd/l3fwd_event.c with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
diff a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c	(rejected hunks)
@@ -985,6 +985,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
 }
 
+static void
+perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+		      void *args)
+{
+	rte_mempool_put(args, ev.event_ptr);
+}
+
 void
 perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
@@ -1000,6 +1007,7 @@ perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
 			events[i].op = RTE_EVENT_OP_RELEASE;
 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
 	}
+	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
 }
 
 void
diff a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c	(rejected hunks)
@@ -518,6 +518,16 @@ pipeline_vector_array_free(struct rte_event events[], uint16_t num)
 	}
 }
 
+static void
+pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+			  void *args __rte_unused)
+{
+	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+		pipeline_vector_array_free(&ev, 1);
+	else
+		rte_pktmbuf_free(ev.mbuf);
+}
+
 void
 pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
 			uint16_t enq, uint16_t deq)
@@ -542,6 +552,8 @@ pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
 
 		rte_event_enqueue_burst(dev, port, ev, deq);
 	}
+
+	rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
 }
 
 void
diff a/examples/eventdev_pipeline/pipeline_common.h b/examples/eventdev_pipeline/pipeline_common.h	(rejected hunks)
@@ -140,6 +140,13 @@ schedule_devices(unsigned int lcore_id)
 	}
 }
 
+static void
+event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+		 void *args __rte_unused)
+{
+	rte_mempool_put(args, ev.event_ptr);
+}
+
 static inline void
 worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
 	       uint16_t nb_enq, uint16_t nb_deq)
@@ -160,6 +167,8 @@ worker_cleanup(uint8_t dev_id, uint8_t port_id, struct rte_event events[],
 			events[i].op = RTE_EVENT_OP_RELEASE;
 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
 	}
+
+	rte_event_port_quiesce(dev_id, port_id, event_port_flush, NULL);
 }
 
 void set_worker_generic_setup_data(struct setup_data *caps, bool burst);
diff a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c	(rejected hunks)
@@ -861,6 +868,9 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 		rte_event_enqueue_burst(links[0].eventdev_id,
 					links[0].event_port_id, &ev, 1);
 	}
+
+	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+			       ipsec_event_port_flush, NULL);
 }
 
 /*
@@ -974,6 +984,9 @@ ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 		rte_event_enqueue_burst(links[0].eventdev_id,
 					links[0].event_port_id, &ev, 1);
 	}
+
+	rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
+			       ipsec_event_port_flush, NULL);
 }
 
 static uint8_t
diff a/examples/l2fwd-event/l2fwd_common.c b/examples/l2fwd-event/l2fwd_common.c	(rejected hunks)
@@ -128,6 +128,16 @@ l2fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
 	}
 }
 
+static void
+l2fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+		       void *args __rte_unused)
+{
+	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+		l2fwd_event_vector_array_free(&ev, 1);
+	else
+		rte_pktmbuf_free(ev.mbuf);
+}
+
 void
 l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
 			   struct rte_event events[], uint16_t nb_enq,
@@ -147,4 +157,7 @@ l2fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t port_id,
 			events[i].op = RTE_EVENT_OP_RELEASE;
 		rte_event_enqueue_burst(event_d_id, port_id, events, nb_deq);
 	}
+
+	rte_event_port_quiesce(event_d_id, port_id, l2fwd_event_port_flush,
+			       NULL);
 }
diff a/examples/l3fwd/l3fwd_event.c b/examples/l3fwd/l3fwd_event.c	(rejected hunks)
@@ -301,6 +301,16 @@ l3fwd_event_vector_array_free(struct rte_event events[], uint16_t num)
 	}
 }
 
+static void
+l3fwd_event_port_flush(uint8_t event_d_id __rte_unused, struct rte_event ev,
+		       void *args __rte_unused)
+{
+	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+		l3fwd_event_vector_array_free(&ev, 1);
+	else
+		rte_pktmbuf_free(ev.mbuf);
+}
+
 void
 l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
 			   struct rte_event events[], uint16_t nb_enq,
@@ -320,4 +330,7 @@ l3fwd_event_worker_cleanup(uint8_t event_d_id, uint8_t event_p_id,
 			events[i].op = RTE_EVENT_OP_RELEASE;
 		rte_event_enqueue_burst(event_d_id, event_p_id, events, nb_deq);
 	}
+
+	rte_event_port_quiesce(event_d_id, event_p_id, l3fwd_event_port_flush,
+			       NULL);
 }

https://lab.dpdk.org/results/dashboard/patchsets/22111/

UNH-IOL DPDK Community Lab

                 reply	other threads:[~2022-05-13 18:19 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220513181903.192386D50B@noxus.dpdklab.iol.unh.edu \
    --to=dpdklab@iol.unh.edu \
    --cc=dpdk-test-reports@iol.unh.edu \
    --cc=test-report@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).