DPDK patches and discussions
 help / color / mirror / Atom feed
From: zhoumin <zhoumin@loongson.cn>
To: thomas@monjalon.net, david.marchand@redhat.com,
	bruce.richardson@intel.com, anatoly.burakov@intel.com,
	qiming.yang@intel.com, Yuying.Zhang@intel.com,
	jgrajcia@cisco.com, konstantin.v.ananyev@yandex.ru
Cc: dev@dpdk.org, maobibo@loongson.cn
Subject: Re: [v3 01/24] eal/loongarch: add atomic operations for LoongArch
Date: Wed, 20 Jul 2022 18:16:38 +0800	[thread overview]
Message-ID: <84f55f02-12da-ea7d-ab0e-fd26086b997e@loongson.cn> (raw)
In-Reply-To: <20220606131054.2097526-2-zhoumin@loongson.cn>

Ping for review or feedback for this new arch support.

Thanks,
Min


On 2022年06月06日 21:10, Min Zhou wrote:
> This patch adds architecture specific atomic operations for
> LoongArch architecture. These implementations use standard atomics
> of toolchain and heavily reference generic atomics codes.
>
> Signed-off-by: Min Zhou <zhoumin@loongson.cn>
> ---
>   lib/eal/loongarch/include/rte_atomic.h | 253 +++++++++++++++++++++++++
>   1 file changed, 253 insertions(+)
>   create mode 100644 lib/eal/loongarch/include/rte_atomic.h
>
> diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h
> new file mode 100644
> index 0000000000..8e007e7f76
> --- /dev/null
> +++ b/lib/eal/loongarch/include/rte_atomic.h
> @@ -0,0 +1,253 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2022 Loongson Technology Corporation Limited
> + */
> +
> +#ifndef _RTE_ATOMIC_LOONGARCH_H_
> +#define _RTE_ATOMIC_LOONGARCH_H_
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <stdint.h>
> +#include "generic/rte_atomic.h"
> +
> +/**
> + * LoongArch Synchronize
> + */
> +static inline void synchronize(void)
> +{
> +	__asm__ __volatile__("dbar 0":::"memory");
> +}
> +
> +/**
> + * General memory barrier.
> + *
> + * Guarantees that the LOAD and STORE operations generated before the
> + * barrier occur before the LOAD and STORE operations generated after.
> + * This function is architecture dependent.
> + */
> +#define rte_mb() synchronize()
> +
> +/**
> + * Write memory barrier.
> + *
> + * Guarantees that the STORE operations generated before the barrier
> + * occur before the STORE operations generated after.
> + * This function is architecture dependent.
> + */
> +#define rte_wmb() synchronize()
> +
> +/**
> + * Read memory barrier.
> + *
> + * Guarantees that the LOAD operations generated before the barrier
> + * occur before the LOAD operations generated after.
> + * This function is architecture dependent.
> + */
> +#define rte_rmb() synchronize()
> +
> +#define rte_smp_mb() rte_mb()
> +
> +#define rte_smp_wmb() rte_mb()
> +
> +#define rte_smp_rmb() rte_mb()
> +
> +#define rte_io_mb() rte_mb()
> +
> +#define rte_io_wmb() rte_mb()
> +
> +#define rte_io_rmb() rte_mb()
> +
> +static __rte_always_inline void
> +rte_atomic_thread_fence(int memorder)
> +{
> +	__atomic_thread_fence(memorder);
> +}
> +
> +#ifndef RTE_FORCE_INTRINSICS
> +/*------------------------- 16 bit atomic operations -------------------------*/
> +static inline int
> +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
> +{
> +	return __sync_bool_compare_and_swap(dst, exp, src);
> +}
> +
> +static inline uint16_t
> +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
> +{
> +#if defined(__clang__)
> +	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
> +#else
> +	return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
> +#endif
> +}
> +
> +static inline void
> +rte_atomic16_inc(rte_atomic16_t *v)
> +{
> +	rte_atomic16_add(v, 1);
> +}
> +
> +static inline void
> +rte_atomic16_dec(rte_atomic16_t *v)
> +{
> +	rte_atomic16_sub(v, 1);
> +}
> +
> +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
> +{
> +	return __sync_add_and_fetch(&v->cnt, 1) == 0;
> +}
> +
> +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
> +{
> +	return __sync_sub_and_fetch(&v->cnt, 1) == 0;
> +}
> +
> +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
> +{
> +	return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
> +}
> +
> +/*------------------------- 32 bit atomic operations -------------------------*/
> +static inline int
> +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
> +{
> +	return __sync_bool_compare_and_swap(dst, exp, src);
> +}
> +
> +static inline uint32_t
> +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
> +{
> +#if defined(__clang__)
> +	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
> +#else
> +	return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
> +#endif
> +}
> +
> +static inline void
> +rte_atomic32_inc(rte_atomic32_t *v)
> +{
> +	rte_atomic32_add(v, 1);
> +}
> +
> +static inline void
> +rte_atomic32_dec(rte_atomic32_t *v)
> +{
> +	rte_atomic32_sub(v, 1);
> +}
> +
> +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
> +{
> +	return __sync_add_and_fetch(&v->cnt, 1) == 0;
> +}
> +
> +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
> +{
> +	return __sync_sub_and_fetch(&v->cnt, 1) == 0;
> +}
> +
> +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
> +{
> +	return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
> +}
> +
> +/*------------------------- 64 bit atomic operations -------------------------*/
> +static inline int
> +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
> +{
> +	return __sync_bool_compare_and_swap(dst, exp, src);
> +}
> +
> +static inline uint64_t
> +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
> +{
> +#if defined(__clang__)
> +	return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
> +#else
> +	return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
> +#endif
> +}
> +
> +static inline void
> +rte_atomic64_init(rte_atomic64_t *v)
> +{
> +	v->cnt = 0;
> +}
> +
> +static inline int64_t
> +rte_atomic64_read(rte_atomic64_t *v)
> +{
> +	return v->cnt;
> +}
> +
> +static inline void
> +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
> +{
> +	v->cnt = new_value;
> +}
> +
> +static inline void
> +rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
> +{
> +	__sync_fetch_and_add(&v->cnt, inc);
> +}
> +
> +static inline void
> +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
> +{
> +	__sync_fetch_and_sub(&v->cnt, dec);
> +}
> +
> +static inline void
> +rte_atomic64_inc(rte_atomic64_t *v)
> +{
> +	rte_atomic64_add(v, 1);
> +}
> +
> +static inline void
> +rte_atomic64_dec(rte_atomic64_t *v)
> +{
> +	rte_atomic64_sub(v, 1);
> +}
> +
> +static inline int64_t
> +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
> +{
> +	return __sync_add_and_fetch(&v->cnt, inc);
> +}
> +
> +static inline int64_t
> +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
> +{
> +	return __sync_sub_and_fetch(&v->cnt, dec);
> +}
> +
> +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
> +{
> +	return rte_atomic64_add_return(v, 1) == 0;
> +}
> +
> +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
> +{
> +	return rte_atomic64_sub_return(v, 1) == 0;
> +}
> +
> +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
> +{
> +	return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
> +}
> +
> +static inline void rte_atomic64_clear(rte_atomic64_t *v)
> +{
> +	rte_atomic64_set(v, 0);
> +}
> +#endif
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_ATOMIC_LOONGARCH_H_ */

-- 
  本邮件及其附件含有龙芯中科的商业秘密信息,仅限于发送给上面地址中列出的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制或散发)本邮件及其附件中的信息。如果您错收本邮件,请您立即电话或邮件通知发件人并删除本邮件。

This email and its attachments contain confidential information from Loongson Technology , which is intended only for the person or entity whose address is listed above. Any use of the information contained herein in any way (including, but not limited to, total or partial disclosure, reproduction or dissemination) by persons other than the intended recipient(s) is prohibited. If you receive this email in error, please notify the sender by phone or email immediately and delete it.


  reply	other threads:[~2022-07-20 10:16 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-06 13:10 [v3 00/24] Support LoongArch architecture Min Zhou
2022-06-06 13:10 ` [v3 01/24] eal/loongarch: add atomic operations for LoongArch Min Zhou
2022-07-20 10:16   ` zhoumin [this message]
2022-06-06 13:10 ` [v3 02/24] eal/loongarch: add byte order " Min Zhou
2022-06-06 13:10 ` [v3 03/24] eal/loongarch: add cpu cycle " Min Zhou
2022-06-06 13:10 ` [v3 04/24] eal/loongarch: add prefetch " Min Zhou
2022-06-06 13:10 ` [v3 05/24] eal/loongarch: add spinlock " Min Zhou
2022-06-06 13:10 ` [v3 06/24] eal/loongarch: add cpu flag checks " Min Zhou
2022-06-06 13:10 ` [v3 07/24] eal/loongarch: add dummy vector memcpy " Min Zhou
2022-06-06 13:10 ` [v3 08/24] eal/loongarch: add io operations " Min Zhou
2022-06-06 13:10 ` [v3 09/24] eal/loongarch: add mcslock " Min Zhou
2022-06-06 13:10 ` [v3 10/24] eal/loongarch: add pause " Min Zhou
2022-06-06 13:10 ` [v3 11/24] eal/loongarch: add pflock " Min Zhou
2022-06-06 13:10 ` [v3 12/24] eal/loongarch: add rwlock " Min Zhou
2022-06-06 13:10 ` [v3 13/24] eal/loongarch: add ticketlock " Min Zhou
2022-06-06 13:10 ` [v3 14/24] eal/loongarch: add power " Min Zhou
2022-06-06 13:10 ` [v3 15/24] eal/loongarch: add hypervisor " Min Zhou
2022-06-06 13:10 ` [v3 16/24] mem: add huge page size definition " Min Zhou
2022-06-06 13:10 ` [v3 17/24] eal/linux: set eal base address " Min Zhou
2022-06-06 13:10 ` [v3 18/24] meson: introduce LoongArch architecture Min Zhou
2022-06-06 13:10 ` [v3 19/24] test/xmmt_ops: add dummy vector implementation for LoongArch Min Zhou
2022-06-06 13:10 ` [v3 20/24] ixgbe: " Min Zhou
2022-06-06 13:10 ` [v3 21/24] i40e: " Min Zhou
2022-06-06 13:10 ` [v3 22/24] tap: add system call number " Min Zhou
2022-06-06 13:10 ` [v3 23/24] memif: " Min Zhou
2022-06-06 13:10 ` [v3 24/24] maintainers: claim responsibility " Min Zhou
2022-07-20 16:33 ` [v3 00/24] Support LoongArch architecture David Marchand
2022-07-21 10:33   ` zhoumin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=84f55f02-12da-ea7d-ab0e-fd26086b997e@loongson.cn \
    --to=zhoumin@loongson.cn \
    --cc=Yuying.Zhang@intel.com \
    --cc=anatoly.burakov@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jgrajcia@cisco.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=maobibo@loongson.cn \
    --cc=qiming.yang@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).