From: Jerin Jacob <jerinjacobk@gmail.com>
To: Ashwin Sekhar T K <asekhar@marvell.com>,
Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
Ferruh Yigit <ferruh.yigit@intel.com>
Cc: dpdk-dev <dev@dpdk.org>,
Nithin Dabilpuram <ndabilpuram@marvell.com>,
Jerin Jacob <jerinj@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Koteswara Rao Kottidi <skoteshwar@marvell.com>,
Pavan Nikhilesh <pbhagavatula@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Satheesh Paul <psatheesh@marvell.com>,
Anoob Joseph <anoobj@marvell.com>,
Akhil Goyal <gakhil@marvell.com>
Subject: Re: [PATCH] common/cnxk: update cpu directive in NPA assembly code
Date: Thu, 6 Jan 2022 19:33:40 +0530 [thread overview]
Message-ID: <CALBAE1PyY5m47ZUe52uDskHuXonXbe19BcKSXNw669rou+entw@mail.gmail.com> (raw)
In-Reply-To: <20211130054809.2697001-1-asekhar@marvell.com>
On Tue, Nov 30, 2021 at 11:20 AM Ashwin Sekhar T K <asekhar@marvell.com> wrote:
>
> Update the CPU directive in ROC NPA assembly code snippets.
>
> Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
Update the git commit as
common/cnxk: update CPU directive in NPA assembly code
Use arch_extension instead of .cpu directive in NPA assembly code
snippets. Using .cpu directive with generic causes it to override
the micro architecture selected by march,mcpu.
For example if march=armv8.5-a+crypto+sve2 provided then the .cpu
directive overrides it to generic+crypto+sve2, use arch_extension
to get the expected result.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Applied to dpdk-next-net-mrvl/for-next-net. Thanks
> ---
> drivers/common/cnxk/roc_npa.h | 30 +++++++++++++-----------------
> 1 file changed, 13 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
> index 46350fdb48..aeadc3d5e2 100644
> --- a/drivers/common/cnxk/roc_npa.h
> +++ b/drivers/common/cnxk/roc_npa.h
> @@ -433,7 +433,7 @@ roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num,
> switch (num) {
> case 30:
> asm volatile(
> - ".cpu generic+lse\n"
> + ".arch_extension lse\n"
> "mov v18.d[0], %[dst]\n"
> "mov v18.d[1], %[loc]\n"
> "mov v19.d[0], %[wdata]\n"
> @@ -497,7 +497,7 @@ roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num,
> break;
> case 16:
> asm volatile(
> - ".cpu generic+lse\n"
> + ".arch_extension lse\n"
> "mov x16, %[wdata]\n"
> "mov x17, %[wdata]\n"
> "casp x0, x1, x16, x17, [%[loc]]\n"
> @@ -517,15 +517,14 @@ roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num,
> "stp x12, x13, [%[dst], #96]\n"
> "stp x14, x15, [%[dst], #112]\n"
> :
> - : [wdata] "r" (wdata), [dst] "r" (buf), [loc] "r" (addr)
> + : [wdata] "r"(wdata), [dst] "r"(buf), [loc] "r"(addr)
> : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6",
> "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14",
> - "x15", "x16", "x17"
> - );
> + "x15", "x16", "x17");
> break;
> case 8:
> asm volatile(
> - ".cpu generic+lse\n"
> + ".arch_extension lse\n"
> "mov x16, %[wdata]\n"
> "mov x17, %[wdata]\n"
> "casp x0, x1, x16, x17, [%[loc]]\n"
> @@ -537,14 +536,13 @@ roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num,
> "stp x4, x5, [%[dst], #32]\n"
> "stp x6, x7, [%[dst], #48]\n"
> :
> - : [wdata] "r" (wdata), [dst] "r" (buf), [loc] "r" (addr)
> + : [wdata] "r"(wdata), [dst] "r"(buf), [loc] "r"(addr)
> : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6",
> - "x7", "x16", "x17"
> - );
> + "x7", "x16", "x17");
> break;
> case 4:
> asm volatile(
> - ".cpu generic+lse\n"
> + ".arch_extension lse\n"
> "mov x16, %[wdata]\n"
> "mov x17, %[wdata]\n"
> "casp x0, x1, x16, x17, [%[loc]]\n"
> @@ -552,21 +550,19 @@ roc_npa_aura_bulk_alloc(uint64_t aura_handle, uint64_t *buf, unsigned int num,
> "stp x0, x1, [%[dst]]\n"
> "stp x2, x3, [%[dst], #16]\n"
> :
> - : [wdata] "r" (wdata), [dst] "r" (buf), [loc] "r" (addr)
> - : "memory", "x0", "x1", "x2", "x3", "x16", "x17"
> - );
> + : [wdata] "r"(wdata), [dst] "r"(buf), [loc] "r"(addr)
> + : "memory", "x0", "x1", "x2", "x3", "x16", "x17");
> break;
> case 2:
> asm volatile(
> - ".cpu generic+lse\n"
> + ".arch_extension lse\n"
> "mov x16, %[wdata]\n"
> "mov x17, %[wdata]\n"
> "casp x0, x1, x16, x17, [%[loc]]\n"
> "stp x0, x1, [%[dst]]\n"
> :
> - : [wdata] "r" (wdata), [dst] "r" (buf), [loc] "r" (addr)
> - : "memory", "x0", "x1", "x16", "x17"
> - );
> + : [wdata] "r"(wdata), [dst] "r"(buf), [loc] "r"(addr)
> + : "memory", "x0", "x1", "x16", "x17");
> break;
> case 1:
> buf[0] = roc_npa_aura_op_alloc(aura_handle, drop);
> --
> 2.32.0
>
prev parent reply other threads:[~2022-01-06 14:04 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-30 5:48 Ashwin Sekhar T K
2022-01-06 14:03 ` Jerin Jacob [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CALBAE1PyY5m47ZUe52uDskHuXonXbe19BcKSXNw669rou+entw@mail.gmail.com \
--to=jerinjacobk@gmail.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=anoobj@marvell.com \
--cc=asekhar@marvell.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=psatheesh@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).