DPDK patches and discussions
 help / color / mirror / Atom feed
From: Haiyue Wang <haiyue.wang@intel.com>
To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com,
	qiming.yang@intel.com
Cc: Haiyue Wang <haiyue.wang@intel.com>
Subject: [dpdk-dev] [PATCH v3 4/4] net/ice/base: osdep.h clean up
Date: Wed, 15 Jan 2020 08:50:28 +0800	[thread overview]
Message-ID: <20200115005028.21026-5-haiyue.wang@intel.com> (raw)
In-Reply-To: <20200115005028.21026-1-haiyue.wang@intel.com>

Remove the unused definitions, rewrite the IO data read/write helpers,
and put the common definitions related to RTE defines under the macro
__INTEL_NET_BASE_OSDEP__, so it works like OS(RTE) dependency.

Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
---
 drivers/net/ice/base/ice_osdep.h | 132 ++++++++++++++++---------------
 1 file changed, 67 insertions(+), 65 deletions(-)

diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 27c1830c5..45b9f3617 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -26,6 +26,9 @@
 
 #include "../ice_logs.h"
 
+#ifndef __INTEL_NET_BASE_OSDEP__
+#define __INTEL_NET_BASE_OSDEP__
+
 #define INLINE inline
 #define STATIC static
 
@@ -38,17 +41,6 @@ typedef int32_t         s32;
 typedef uint64_t        u64;
 typedef uint64_t        s64;
 
-#define __iomem
-#define hw_dbg(hw, S, A...) do {} while (0)
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
-#define lower_32_bits(n) ((u32)(n))
-#define low_16_bits(x)   ((x) & 0xFFFF)
-#define high_16_bits(x)  (((x) & 0xFFFF0000) >> 16)
-
-#ifndef ETH_ADDR_LEN
-#define ETH_ADDR_LEN                  6
-#endif
-
 #ifndef __le16
 #define __le16          uint16_t
 #endif
@@ -68,6 +60,65 @@ typedef uint64_t        s64;
 #define __be64          uint64_t
 #endif
 
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define FIELD_SIZEOF(t, f) RTE_SIZEOF_FIELD(t, f)
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
+#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
+#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
+#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
+#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
+#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
+
+#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
+#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
+#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
+
+#define NTOHS(a) rte_be_to_cpu_16(a)
+#define NTOHL(a) rte_be_to_cpu_32(a)
+#define HTONS(a) rte_cpu_to_be_16(a)
+#define HTONL(a) rte_cpu_to_be_32(a)
+
+static __rte_always_inline uint32_t
+readl(volatile void *addr)
+{
+	return rte_le_to_cpu_32(rte_read32(addr));
+}
+
+static __rte_always_inline void
+writel(uint32_t value, volatile void *addr)
+{
+	rte_write32(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline void
+writel_relaxed(uint32_t value, volatile void *addr)
+{
+	rte_write32_relaxed(rte_cpu_to_le_32(value), addr);
+}
+
+static __rte_always_inline uint64_t
+readq(volatile void *addr)
+{
+	return rte_le_to_cpu_64(rte_read64(addr));
+}
+
+static __rte_always_inline void
+writeq(uint64_t value, volatile void *addr)
+{
+	rte_write64(rte_cpu_to_le_64(value), addr);
+}
+
+#define wr32(a, reg, value) writel((value), (a)->hw_addr + (reg))
+#define rd32(a, reg)        readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), (a)->hw_addr + (reg))
+#define rd64(a, reg)        readq((a)->hw_addr + (reg))
+
+#endif /* __INTEL_NET_BASE_OSDEP__ */
+
 #ifndef __always_unused
 #define __always_unused  __attribute__((unused))
 #endif
@@ -82,21 +133,8 @@ typedef uint64_t        s64;
 #define BIT_ULL(a) (1ULL << (a))
 #endif
 
-#define FALSE           0
-#define TRUE            1
-#define false           0
-#define true            1
-
-#define min(a, b) RTE_MIN(a, b)
-#define max(a, b) RTE_MAX(a, b)
-
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
 #define MAKEMASK(m, s) ((m) << (s))
 
-#define DEBUGOUT(S, A...) PMD_DRV_LOG_RAW(DEBUG, S, ##A)
-#define DEBUGFUNC(F) PMD_DRV_LOG_RAW(DEBUG, F)
-
 #define ice_debug(h, m, s, ...)					\
 do {								\
 	if (((m) & (h)->debug_mask))				\
@@ -123,37 +161,16 @@ do {									\
 #define SNPRINTF ice_snprintf
 #endif
 
-#define ICE_PCI_REG(reg)     rte_read32(reg)
-#define ICE_PCI_REG_ADDR(a, reg) \
-	((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
-#define ICE_PCI_REG64(reg)     rte_read64(reg)
-#define ICE_PCI_REG_ADDR64(a, reg) \
-	((volatile uint64_t *)((char *)(a)->hw_addr + (reg)))
-static inline uint32_t ice_read_addr(volatile void *addr)
-{
-	return rte_le_to_cpu_32(ICE_PCI_REG(addr));
-}
-
-static inline uint64_t ice_read_addr64(volatile void *addr)
-{
-	return rte_le_to_cpu_64(ICE_PCI_REG64(addr));
-}
+#define ICE_PCI_REG_WRITE(reg, value) writel(value, reg)
 
-#define ICE_PCI_REG_WRITE(reg, value) \
-	rte_write32((rte_cpu_to_le_32(value)), reg)
+#define ICE_READ_REG(hw, reg)         rd32(hw, reg)
+#define ICE_WRITE_REG(hw, reg, value) wr32(hw, reg, value)
 
 #define ice_flush(a)   ICE_READ_REG((a), GLGEN_STAT)
 #define icevf_flush(a) ICE_READ_REG((a), VFGEN_RSTAT)
-#define ICE_READ_REG(hw, reg) ice_read_addr(ICE_PCI_REG_ADDR((hw), (reg)))
-#define ICE_WRITE_REG(hw, reg, value) \
-	ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((hw), (reg)), (value))
-
-#define rd32(a, reg) ice_read_addr(ICE_PCI_REG_ADDR((a), (reg)))
-#define wr32(a, reg, value) \
-	ICE_PCI_REG_WRITE(ICE_PCI_REG_ADDR((a), (reg)), (value))
-#define flush(a) ice_read_addr(ICE_PCI_REG_ADDR((a), (GLGEN_STAT)))
+
+#define flush(a) ICE_READ_REG((a), GLGEN_STAT)
 #define div64_long(n, d) ((n) / (d))
-#define rd64(a, reg) ice_read_addr64(ICE_PCI_REG_ADDR64((a), (reg)))
 
 #define BITS_PER_BYTE       8
 
@@ -178,21 +195,6 @@ struct ice_virt_mem {
 #define ice_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
 #define ice_memdup(a, b, c, d) rte_memcpy(ice_malloc(a, c), b, c)
 
-#define CPU_TO_BE16(o) rte_cpu_to_be_16(o)
-#define CPU_TO_BE32(o) rte_cpu_to_be_32(o)
-#define CPU_TO_BE64(o) rte_cpu_to_be_64(o)
-#define CPU_TO_LE16(o) rte_cpu_to_le_16(o)
-#define CPU_TO_LE32(s) rte_cpu_to_le_32(s)
-#define CPU_TO_LE64(h) rte_cpu_to_le_64(h)
-#define LE16_TO_CPU(a) rte_le_to_cpu_16(a)
-#define LE32_TO_CPU(c) rte_le_to_cpu_32(c)
-#define LE64_TO_CPU(k) rte_le_to_cpu_64(k)
-
-#define NTOHS(a) rte_be_to_cpu_16(a)
-#define NTOHL(a) rte_be_to_cpu_32(a)
-#define HTONS(a) rte_cpu_to_be_16(a)
-#define HTONL(a) rte_cpu_to_be_32(a)
-
 /* SW spinlock */
 struct ice_lock {
 	rte_spinlock_t spinlock;
-- 
2.17.1


  parent reply	other threads:[~2020-01-15  0:58 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-15  0:50 [dpdk-dev] [PATCH v3 0/4] Intel iavf and ice PMDs " Haiyue Wang
2020-01-15  0:50 ` [dpdk-dev] [PATCH v3 1/4] net/iavf: unify the bool type value Haiyue Wang
2020-01-15  0:50 ` [dpdk-dev] [PATCH v3 2/4] net/ice: " Haiyue Wang
2020-01-15  0:50 ` [dpdk-dev] [PATCH v3 3/4] common/iavf: osdep.h clean up Haiyue Wang
2020-01-15  0:50 ` Haiyue Wang [this message]
2020-01-15 10:55 ` [dpdk-dev] [PATCH v3 0/4] Intel iavf and ice PMDs " Zhang, Qi Z
2020-01-16 18:20   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200115005028.21026-5-haiyue.wang@intel.com \
    --to=haiyue.wang@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).