From: Thomas Monjalon <thomas@monjalon.net>
To: dev@dpdk.org
Cc: Jerin Jacob <jerinj@marvell.com>,
Qiming Yang <qiming.yang@intel.com>,
Wenzhuo Lu <wenzhuo.lu@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Konstantin Ananyev <konstantin.ananyev@intel.com>,
David Hunt <david.hunt@intel.com>
Subject: [dpdk-dev] [PATCH 20.05 07/15] replace always-inline attributes
Date: Mon, 10 Feb 2020 17:20:24 +0100 [thread overview]
Message-ID: <20200210162032.1177478-8-thomas@monjalon.net> (raw)
In-Reply-To: <20200210162032.1177478-1-thomas@monjalon.net>
There is a macro __rte_always_inline, forcing functions to be inlined,
which is now used where appropriate for consistency.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
app/test-eventdev/test_order_atq.c | 2 +-
app/test-eventdev/test_order_common.h | 4 ++--
app/test-eventdev/test_order_queue.c | 2 +-
app/test-eventdev/test_perf_atq.c | 4 ++--
app/test-eventdev/test_perf_common.h | 4 ++--
app/test-eventdev/test_perf_queue.c | 4 ++--
drivers/net/ice/ice_rxtx.c | 2 +-
.../common/include/arch/x86/rte_rtm.h | 6 +++---
lib/librte_power/rte_power_empty_poll.c | 18 +++++++++---------
9 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index abccbccacb..3366cfce9a 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -9,7 +9,7 @@
/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_atq_process_stage_0(struct rte_event *const ev)
{
ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 22a1cc8325..e0fe9c968a 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -62,7 +62,7 @@ order_nb_event_ports(struct evt_options *opt)
return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
@@ -87,7 +87,7 @@ order_process_stage_1(struct test_order *const t,
rte_atomic64_sub(outstand_pkts, 1);
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_process_stage_invalid(struct test_order *const t,
struct rte_event *const ev)
{
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index 7ac570c730..495efd92f9 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -9,7 +9,7 @@
/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_queue_process_stage_0(struct rte_event *const ev)
{
ev->queue_id = 1; /* q1 atomic queue */
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index d0241ec4ae..8fd51004ee 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -14,7 +14,7 @@ atq_nb_event_queues(struct evt_options *opt)
rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_mark_fwd_latency(struct rte_event *const ev)
{
if (unlikely(ev->sub_event_type == 0)) {
@@ -24,7 +24,7 @@ atq_mark_fwd_latency(struct rte_event *const ev)
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
const uint8_t nb_stages)
{
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index d8fbee6d89..ff9705df88 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -91,7 +91,7 @@ struct perf_elt {
printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
rte_lcore_id(), dev, port)
-static inline __attribute__((always_inline)) int
+static __rte_always_inline int
perf_process_last_stage(struct rte_mempool *const pool,
struct rte_event *const ev, struct worker_data *const w,
void *bufs[], int const buf_sz, uint8_t count)
@@ -107,7 +107,7 @@ perf_process_last_stage(struct rte_mempool *const pool,
return count;
}
-static inline __attribute__((always_inline)) uint8_t
+static __rte_always_inline uint8_t
perf_process_last_stage_latency(struct rte_mempool *const pool,
struct rte_event *const ev, struct worker_data *const w,
void *bufs[], int const buf_sz, uint8_t count)
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index 29098580e7..f4ea3a795f 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -15,7 +15,7 @@ perf_queue_nb_event_queues(struct evt_options *opt)
return nb_prod * opt->nb_stages;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
mark_fwd_latency(struct rte_event *const ev,
const uint8_t nb_stages)
{
@@ -26,7 +26,7 @@ mark_fwd_latency(struct rte_event *const ev,
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
const uint8_t nb_stages)
{
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index ce5b8e6ca3..045680533f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -2655,7 +2655,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
diff --git a/lib/librte_eal/common/include/arch/x86/rte_rtm.h b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
index eb0f8e81e1..36bf49846f 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_rtm.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_rtm.h
@@ -25,7 +25,7 @@ extern "C" {
#define RTE_XABORT_NESTED (1 << 5)
#define RTE_XABORT_CODE(x) (((x) >> 24) & 0xff)
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
unsigned int rte_xbegin(void)
{
unsigned int ret = RTE_XBEGIN_STARTED;
@@ -34,7 +34,7 @@ unsigned int rte_xbegin(void)
return ret;
}
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
void rte_xend(void)
{
asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
@@ -45,7 +45,7 @@ void rte_xend(void)
asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
} while (0)
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
int rte_xtest(void)
{
unsigned char out;
diff --git a/lib/librte_power/rte_power_empty_poll.c b/lib/librte_power/rte_power_empty_poll.c
index 0a8024ddca..70c07b1533 100644
--- a/lib/librte_power/rte_power_empty_poll.c
+++ b/lib/librte_power/rte_power_empty_poll.c
@@ -52,13 +52,13 @@ set_power_freq(int lcore_id, enum freq_val freq, bool specific_freq)
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
exit_training_state(struct priority_worker *poll_stats)
{
RTE_SET_USED(poll_stats);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_training_state(struct priority_worker *poll_stats)
{
poll_stats->iter_counter = 0;
@@ -66,7 +66,7 @@ enter_training_state(struct priority_worker *poll_stats)
poll_stats->queue_state = TRAINING;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_normal_state(struct priority_worker *poll_stats)
{
/* Clear the averages arrays and strs */
@@ -86,7 +86,7 @@ enter_normal_state(struct priority_worker *poll_stats)
poll_stats->thresh[HGH].threshold_percent = high_to_med_threshold;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_busy_state(struct priority_worker *poll_stats)
{
memset(poll_stats->edpi_av, 0, sizeof(poll_stats->edpi_av));
@@ -101,14 +101,14 @@ enter_busy_state(struct priority_worker *poll_stats)
set_power_freq(poll_stats->lcore_id, HGH, false);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_purge_state(struct priority_worker *poll_stats)
{
poll_stats->iter_counter = 0;
poll_stats->queue_state = LOW_PURGE;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
set_state(struct priority_worker *poll_stats,
enum queue_state new_state)
{
@@ -131,7 +131,7 @@ set_state(struct priority_worker *poll_stats,
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
set_policy(struct priority_worker *poll_stats,
struct ep_policy *policy)
{
@@ -204,7 +204,7 @@ update_training_stats(struct priority_worker *poll_stats,
}
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
update_stats(struct priority_worker *poll_stats)
{
uint64_t tot_edpi = 0, tot_ppi = 0;
@@ -249,7 +249,7 @@ update_stats(struct priority_worker *poll_stats)
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_stats_normal(struct priority_worker *poll_stats)
{
uint32_t percent;
--
2.25.0
next prev parent reply other threads:[~2020-02-10 16:22 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-02-10 16:20 [dpdk-dev] [PATCH 20.05 00/15] clean-up use of __attribute__ Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 01/15] app/bbdev: replace constructor syntax with common macro Thomas Monjalon
2020-02-10 21:06 ` Chautru, Nicolas
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 02/15] net/mlx5: replace destructor " Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 03/15] net/memif: use common macros for cache line marker Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 04/15] common/mlx5: replace alignas keyword Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 05/15] replace alignment attributes Thomas Monjalon
2020-02-10 19:11 ` David Christensen
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 06/15] replace packed attributes Thomas Monjalon
2020-02-10 16:20 ` Thomas Monjalon [this message]
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 08/15] replace no-inline attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 09/15] replace unused attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 10/15] replace used attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 11/15] replace hot attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 12/15] replace cold attributes Thomas Monjalon
2020-02-10 19:14 ` David Christensen
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 13/15] replace no-return attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 14/15] replace printf format attributes Thomas Monjalon
2020-02-10 16:20 ` [dpdk-dev] [PATCH 20.05 15/15] devtools: check use of compiler attributes Thomas Monjalon
2020-04-16 16:34 ` [dpdk-dev] [PATCH 20.05 00/15] clean-up use of __attribute__ Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200210162032.1177478-8-thomas@monjalon.net \
--to=thomas@monjalon.net \
--cc=bruce.richardson@intel.com \
--cc=david.hunt@intel.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=konstantin.ananyev@intel.com \
--cc=qiming.yang@intel.com \
--cc=wenzhuo.lu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).