* [PATCH 1/1] net/octeon_ep: use devarg to enable ISM accesses
@ 2024-02-23 15:39 Vamsi Attunuru
2024-02-25 15:39 ` Jerin Jacob
2024-02-26 8:59 ` [PATCH v2 " Vamsi Attunuru
0 siblings, 2 replies; 4+ messages in thread
From: Vamsi Attunuru @ 2024-02-23 15:39 UTC (permalink / raw)
To: dev; +Cc: jerinj, vattunuru
Adds a devarg option to enable/disable ISM memory accesses
for reading packet count details. This option is disabled
by default, as ISM memory accesses effect throughput of
bigger size packets.
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
doc/guides/nics/octeon_ep.rst | 12 ++++++++
drivers/net/octeon_ep/cnxk_ep_rx.h | 42 +++++++++++++++++++++-----
drivers/net/octeon_ep/cnxk_ep_tx.c | 42 ++++++++++++++++++++++----
drivers/net/octeon_ep/cnxk_ep_vf.c | 4 +--
drivers/net/octeon_ep/otx2_ep_vf.c | 4 +--
drivers/net/octeon_ep/otx_ep_common.h | 14 +++++++--
drivers/net/octeon_ep/otx_ep_ethdev.c | 43 +++++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 15 ++++++----
drivers/net/octeon_ep/otx_ep_rxtx.h | 2 ++
9 files changed, 153 insertions(+), 25 deletions(-)
diff --git a/doc/guides/nics/octeon_ep.rst b/doc/guides/nics/octeon_ep.rst
index b5040aeee2..befa0a4097 100644
--- a/doc/guides/nics/octeon_ep.rst
+++ b/doc/guides/nics/octeon_ep.rst
@@ -11,6 +11,18 @@ and **Cavium OCTEON** families of adapters in SR-IOV context.
More information can be found at `Marvell Official Website
<https://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-liquidio-III-solutions-brief.pdf>`_.
+Runtime Config Options
+----------------------
+
+- ``Rx&Tx ISM memory accesses enable`` (default ``0``)
+
+ PMD supports 2 modes for checking Rx & Tx packet count, PMD may read the packet count directly
+ from hardware registers or it may read from ISM memory, this may be selected at runtime
+ using ``ism_enable`` ``devargs`` parameter.
+
+ For example::
+
+ -a 0002:02:00.0,ism_enable=1
Prerequisites
-------------
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index 61263e651e..ecf95cd961 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -88,8 +88,9 @@ cnxk_ep_rx_refill(struct otx_ep_droq *droq)
}
static inline uint32_t
-cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
+cnxk_ep_check_rx_ism_mem(void *rx_queue)
{
+ struct otx_ep_droq *droq = (struct otx_ep_droq *)rx_queue;
uint32_t new_pkts;
uint32_t val;
@@ -98,8 +99,9 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
* number of PCIe writes.
*/
val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
- new_pkts = val - droq->pkts_sent_ism_prev;
- droq->pkts_sent_ism_prev = val;
+
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
if (val > RTE_BIT32(31)) {
/* Only subtract the packet count in the HW counter
@@ -113,11 +115,34 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
-
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
}
+
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- droq->pkts_pending += new_pkts;
+
+ return new_pkts;
+}
+
+static inline uint32_t
+cnxk_ep_check_rx_pkt_reg(void *rx_queue)
+{
+ struct otx_ep_droq *droq = (struct otx_ep_droq *)rx_queue;
+ uint32_t new_pkts;
+ uint32_t val;
+
+ val = rte_read32(droq->pkts_sent_reg);
+
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
+
+ if (val > RTE_BIT32(31)) {
+ /* Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write64((uint64_t)val, droq->pkts_sent_reg);
+ rte_mb();
+ droq->pkts_sent_prev = 0;
+ }
return new_pkts;
}
@@ -125,8 +150,11 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
static inline int16_t __rte_hot
cnxk_ep_rx_pkts_to_process(struct otx_ep_droq *droq, uint16_t nb_pkts)
{
+ const otx_ep_check_pkt_count_t cnxk_rx_pkt_count[2] = { cnxk_ep_check_rx_pkt_reg,
+ cnxk_ep_check_rx_ism_mem};
+
if (droq->pkts_pending < nb_pkts)
- cnxk_ep_check_rx_pkts(droq);
+ droq->pkts_pending += cnxk_rx_pkt_count[droq->ism_ena](droq);
return RTE_MIN(nb_pkts, droq->pkts_pending);
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 9f11a2f317..98c0a861c3 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -5,9 +5,10 @@
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
-static uint32_t
-cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
+static inline uint32_t
+cnxk_ep_check_tx_ism_mem(void *tx_queue)
{
+ struct otx_ep_instr_queue *iq = (struct otx_ep_instr_queue *)tx_queue;
uint32_t val;
/* Batch subtractions from the HW counter to reduce PCIe traffic
@@ -15,8 +16,8 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
* number of PCIe writes.
*/
val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
- iq->inst_cnt += val - iq->inst_cnt_ism_prev;
- iq->inst_cnt_ism_prev = val;
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
if (val > (uint32_t)(1 << 31)) {
/* Only subtract the packet count in the HW counter
@@ -31,7 +32,7 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
rte_mb();
}
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
@@ -41,13 +42,42 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
return iq->inst_cnt & (iq->nb_desc - 1);
}
+static inline uint32_t
+cnxk_ep_check_tx_pkt_reg(void *tx_queue)
+{
+ struct otx_ep_instr_queue *iq = (struct otx_ep_instr_queue *)tx_queue;
+ uint32_t val;
+
+ val = rte_read32(iq->inst_cnt_reg);
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /* Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write64((uint64_t)val, iq->inst_cnt_reg);
+ rte_mb();
+
+ iq->inst_cnt_prev = 0;
+ }
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ return iq->inst_cnt & (iq->nb_desc - 1);
+}
+
static inline void
cnxk_ep_flush_iq(struct otx_ep_instr_queue *iq)
{
+ const otx_ep_check_pkt_count_t cnxk_tx_pkt_count[2] = { cnxk_ep_check_tx_pkt_reg,
+ cnxk_ep_check_tx_ism_mem};
+
uint32_t instr_processed = 0;
uint32_t cnt = 0;
- iq->otx_read_index = cnxk_vf_update_read_index(iq);
+ iq->otx_read_index = cnxk_tx_pkt_count[iq->ism_ena](iq);
if (unlikely(iq->flush_index == iq->otx_read_index))
return;
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index ef275703c3..39f357ee81 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -155,7 +155,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
(void *)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
return 0;
@@ -240,7 +240,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
oq_no, (void *)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
loop = OTX_EP_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 7f4edf8dcf..25e0e5a500 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -306,7 +306,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
(void *)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
return 0;
@@ -392,7 +392,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
(void *)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index d64b04d2c2..7776940e1d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -9,6 +9,7 @@
#include <unistd.h>
#include <assert.h>
#include <rte_eal.h>
+#include <rte_kvargs.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_io.h>
@@ -223,7 +224,7 @@ struct otx_ep_instr_queue {
uint8_t *base_addr;
/* track inst count locally to consolidate HW counter updates */
- uint32_t inst_cnt_ism_prev;
+ uint32_t inst_cnt_prev;
/* Input ring index, where the driver should write the next packet */
uint32_t host_write_index;
@@ -261,6 +262,9 @@ struct otx_ep_instr_queue {
/* Number of descriptors in this ring. */
uint32_t nb_desc;
+ /* Use ISM memory */
+ uint8_t ism_ena;
+
/* Size of the descriptor. */
uint8_t desc_size;
@@ -405,9 +409,12 @@ struct otx_ep_droq {
*/
void *pkts_sent_reg;
+ /* Use ISM memory */
+ uint8_t ism_ena;
+
/* Pointer to host memory copy of output packet count, set by ISM */
uint32_t *pkts_sent_ism;
- uint32_t pkts_sent_ism_prev;
+ uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
struct otx_ep_droq_stats stats;
@@ -565,6 +572,9 @@ struct otx_ep_device {
/* Generation */
uint32_t chip_gen;
+
+ /* Use ISM memory */
+ uint8_t ism_ena;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 8daa7d225c..86ed6df6a9 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -15,6 +15,8 @@
#define OTX_EP_DEV(_eth_dev) \
((struct otx_ep_device *)(_eth_dev)->data->dev_private)
+#define OTX_ISM_ENABLE "ism_enable"
+
static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
.nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
.nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
@@ -27,6 +29,41 @@ static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
.nb_align = OTX_EP_TXD_ALIGN,
};
+static int
+parse_flag(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+
+ *(uint8_t *)extra_args = atoi(value);
+
+ return 0;
+}
+
+static int
+otx_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx_ep_device *otx_epvf)
+{
+ struct rte_kvargs *kvlist;
+ uint8_t ism_enable = 0;
+
+ if (devargs == NULL)
+ goto null_devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ goto exit;
+
+ rte_kvargs_process(kvlist, OTX_ISM_ENABLE, &parse_flag, &ism_enable);
+ rte_kvargs_free(kvlist);
+
+null_devargs:
+ otx_epvf->ism_ena = !!ism_enable;
+
+ return 0;
+
+exit:
+ return -EINVAL;
+}
+
static void
otx_ep_set_tx_func(struct rte_eth_dev *eth_dev)
{
@@ -741,6 +778,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
+ /* Parse devargs string */
+ if (otx_ethdev_parse_devargs(eth_dev->device->devargs, otx_epvf)) {
+ otx_ep_err("Failed to parse devargs\n");
+ return -EINVAL;
+ }
+
rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index aea148ee4a..59144e0f84 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -11,6 +11,7 @@
#include <rte_net.h>
#include <ethdev_pci.h>
+#include "cnxk_ep_rx.h"
#include "otx_ep_common.h"
#include "otx_ep_vf.h"
#include "otx_ep_rxtx.h"
@@ -159,6 +160,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
otx_ep->io_qmask.iq64B |= (1ull << iq_no);
iq->iqcmd_64B = (conf->iq.instr_type == 64);
+ iq->ism_ena = otx_ep->ism_ena;
/* Set up IQ registers */
ret = otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
@@ -367,6 +369,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
droq->refill_threshold = c_refill_threshold;
droq->rearm_data = otx_ep_set_rearm_data(otx_ep);
+ droq->ism_ena = otx_ep->ism_ena;
/* Set up OQ registers */
ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
@@ -460,8 +463,8 @@ otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
* number of PCIe writes.
*/
val = *iq->inst_cnt_ism;
- iq->inst_cnt += val - iq->inst_cnt_ism_prev;
- iq->inst_cnt_ism_prev = val;
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
if (val > (uint32_t)(1 << 31)) {
/*
@@ -477,7 +480,7 @@ otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
rte_mb();
}
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
@@ -856,8 +859,8 @@ otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
* number of PCIe writes.
*/
val = *droq->pkts_sent_ism;
- new_pkts = val - droq->pkts_sent_ism_prev;
- droq->pkts_sent_ism_prev = val;
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
if (val > (uint32_t)(1 << 31)) {
/*
@@ -873,7 +876,7 @@ otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
rte_mb();
}
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.h b/drivers/net/octeon_ep/otx_ep_rxtx.h
index f5bc807dc0..6b3abe21b1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.h
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.h
@@ -24,6 +24,8 @@
#define DROQ_REFILL_THRESHOLD 64
#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
+typedef uint32_t (*otx_ep_check_pkt_count_t)(void *queue);
+
static inline uint32_t
otx_ep_incr_index(uint32_t index, uint32_t count, uint32_t max)
{
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/1] net/octeon_ep: use devarg to enable ISM accesses
2024-02-23 15:39 [PATCH 1/1] net/octeon_ep: use devarg to enable ISM accesses Vamsi Attunuru
@ 2024-02-25 15:39 ` Jerin Jacob
2024-02-26 8:59 ` [PATCH v2 " Vamsi Attunuru
1 sibling, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2024-02-25 15:39 UTC (permalink / raw)
To: Vamsi Attunuru; +Cc: dev, jerinj
On Fri, Feb 23, 2024 at 9:35 PM Vamsi Attunuru <vattunuru@marvell.com> wrote:
>
> Adds a devarg option to enable/disable ISM memory accesses
> for reading packet count details. This option is disabled
> by default, as ISM memory accesses effect throughput of
> bigger size packets.
>
> Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
> ---
> doc/guides/nics/octeon_ep.rst | 12 ++++++++
> drivers/net/octeon_ep/cnxk_ep_rx.h | 42 +++++++++++++++++++++-----
> drivers/net/octeon_ep/cnxk_ep_tx.c | 42 ++++++++++++++++++++++----
> drivers/net/octeon_ep/cnxk_ep_vf.c | 4 +--
> drivers/net/octeon_ep/otx2_ep_vf.c | 4 +--
> drivers/net/octeon_ep/otx_ep_common.h | 14 +++++++--
> drivers/net/octeon_ep/otx_ep_ethdev.c | 43 +++++++++++++++++++++++++++
> drivers/net/octeon_ep/otx_ep_rxtx.c | 15 ++++++----
> drivers/net/octeon_ep/otx_ep_rxtx.h | 2 ++
> 9 files changed, 153 insertions(+), 25 deletions(-)
>
> diff --git a/doc/guides/nics/octeon_ep.rst b/doc/guides/nics/octeon_ep.rst
> index b5040aeee2..befa0a4097 100644
> --- a/doc/guides/nics/octeon_ep.rst
> +++ b/doc/guides/nics/octeon_ep.rst
> @@ -11,6 +11,18 @@ and **Cavium OCTEON** families of adapters in SR-IOV context.
> More information can be found at `Marvell Official Website
> <https://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-liquidio-III-solutions-brief.pdf>`_.
>
> +Runtime Config Options
> +----------------------
> +
> +- ``Rx&Tx ISM memory accesses enable`` (default ``0``)
> +
> + PMD supports 2 modes for checking Rx & Tx packet count, PMD may read the packet count directly
2 → two
> + from hardware registers or it may read from ISM memory, this may be selected at runtime
> + using ``ism_enable`` ``devargs`` parameter.
Furthermore, tell why someone needs to choose one vs. others
> +
> + For example::
> +
> + -a 0002:02:00.0,ism_enable=1
>
1) Update release notes new PMD feature
2) Missing updates to RTE_PMD_REGISTER_PARAM_STRING for devargs
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 1/1] net/octeon_ep: use devarg to enable ISM accesses
2024-02-23 15:39 [PATCH 1/1] net/octeon_ep: use devarg to enable ISM accesses Vamsi Attunuru
2024-02-25 15:39 ` Jerin Jacob
@ 2024-02-26 8:59 ` Vamsi Attunuru
2024-02-29 16:32 ` Jerin Jacob
1 sibling, 1 reply; 4+ messages in thread
From: Vamsi Attunuru @ 2024-02-26 8:59 UTC (permalink / raw)
To: dev; +Cc: jerinj, vattunuru
Adds a devarg option to enable/disable ISM memory accesses
for reading packet count details. This option is disabled
by default, as ISM memory accesses effect throughput of
bigger size packets.
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
V2 changes:
- Updated release notes and documentation
- Added missing degarg string
doc/guides/nics/octeon_ep.rst | 14 ++++++++
doc/guides/rel_notes/release_24_03.rst | 2 ++
drivers/net/octeon_ep/cnxk_ep_rx.h | 42 ++++++++++++++++++++----
drivers/net/octeon_ep/cnxk_ep_tx.c | 42 ++++++++++++++++++++----
drivers/net/octeon_ep/cnxk_ep_vf.c | 4 +--
drivers/net/octeon_ep/otx2_ep_vf.c | 4 +--
drivers/net/octeon_ep/otx_ep_common.h | 14 ++++++--
drivers/net/octeon_ep/otx_ep_ethdev.c | 45 ++++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 15 +++++----
drivers/net/octeon_ep/otx_ep_rxtx.h | 2 ++
10 files changed, 159 insertions(+), 25 deletions(-)
diff --git a/doc/guides/nics/octeon_ep.rst b/doc/guides/nics/octeon_ep.rst
index b5040aeee2..db2ff0e7c1 100644
--- a/doc/guides/nics/octeon_ep.rst
+++ b/doc/guides/nics/octeon_ep.rst
@@ -11,6 +11,20 @@ and **Cavium OCTEON** families of adapters in SR-IOV context.
More information can be found at `Marvell Official Website
<https://www.marvell.com/content/dam/marvell/en/public-collateral/embedded-processors/marvell-liquidio-III-solutions-brief.pdf>`_.
+Runtime Config Options
+----------------------
+
+- ``Rx&Tx ISM memory accesses enable`` (default ``0``)
+
+ PMD supports two modes for checking Rx & Tx packet count, PMD may read the packet count directly
+ from hardware registers or it may read from ISM memory, this may be selected at runtime using
+ ``ism_enable`` ``devargs`` parameter. Performance is higher for bigger size packets with default
+ value(ism_enable = 0). Use this runtime option to enable ISM memory accesses to get better
+ performance for lower size packets.
+
+ For example::
+
+ -a 0002:02:00.0,ism_enable=1
Prerequisites
-------------
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 4b3e26ebf6..74ec43ca64 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -121,6 +121,8 @@ New Features
* Added optimized SSE Rx routines.
* Added optimized AVX2 Rx routines.
* Added optimized NEON Rx routines.
+ * Added devarg to enable/disable ISM memory accesses which gives better performance
+ for lower packet sizes when enabled.
* **Updated NVIDIA mlx5 driver.**
diff --git a/drivers/net/octeon_ep/cnxk_ep_rx.h b/drivers/net/octeon_ep/cnxk_ep_rx.h
index 61263e651e..ecf95cd961 100644
--- a/drivers/net/octeon_ep/cnxk_ep_rx.h
+++ b/drivers/net/octeon_ep/cnxk_ep_rx.h
@@ -88,8 +88,9 @@ cnxk_ep_rx_refill(struct otx_ep_droq *droq)
}
static inline uint32_t
-cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
+cnxk_ep_check_rx_ism_mem(void *rx_queue)
{
+ struct otx_ep_droq *droq = (struct otx_ep_droq *)rx_queue;
uint32_t new_pkts;
uint32_t val;
@@ -98,8 +99,9 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
* number of PCIe writes.
*/
val = __atomic_load_n(droq->pkts_sent_ism, __ATOMIC_RELAXED);
- new_pkts = val - droq->pkts_sent_ism_prev;
- droq->pkts_sent_ism_prev = val;
+
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
if (val > RTE_BIT32(31)) {
/* Only subtract the packet count in the HW counter
@@ -113,11 +115,34 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
rte_mb();
}
-
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
}
+
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
- droq->pkts_pending += new_pkts;
+
+ return new_pkts;
+}
+
+static inline uint32_t
+cnxk_ep_check_rx_pkt_reg(void *rx_queue)
+{
+ struct otx_ep_droq *droq = (struct otx_ep_droq *)rx_queue;
+ uint32_t new_pkts;
+ uint32_t val;
+
+ val = rte_read32(droq->pkts_sent_reg);
+
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
+
+ if (val > RTE_BIT32(31)) {
+ /* Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write64((uint64_t)val, droq->pkts_sent_reg);
+ rte_mb();
+ droq->pkts_sent_prev = 0;
+ }
return new_pkts;
}
@@ -125,8 +150,11 @@ cnxk_ep_check_rx_pkts(struct otx_ep_droq *droq)
static inline int16_t __rte_hot
cnxk_ep_rx_pkts_to_process(struct otx_ep_droq *droq, uint16_t nb_pkts)
{
+ const otx_ep_check_pkt_count_t cnxk_rx_pkt_count[2] = { cnxk_ep_check_rx_pkt_reg,
+ cnxk_ep_check_rx_ism_mem};
+
if (droq->pkts_pending < nb_pkts)
- cnxk_ep_check_rx_pkts(droq);
+ droq->pkts_pending += cnxk_rx_pkt_count[droq->ism_ena](droq);
return RTE_MIN(nb_pkts, droq->pkts_pending);
}
diff --git a/drivers/net/octeon_ep/cnxk_ep_tx.c b/drivers/net/octeon_ep/cnxk_ep_tx.c
index 9f11a2f317..98c0a861c3 100644
--- a/drivers/net/octeon_ep/cnxk_ep_tx.c
+++ b/drivers/net/octeon_ep/cnxk_ep_tx.c
@@ -5,9 +5,10 @@
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
-static uint32_t
-cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
+static inline uint32_t
+cnxk_ep_check_tx_ism_mem(void *tx_queue)
{
+ struct otx_ep_instr_queue *iq = (struct otx_ep_instr_queue *)tx_queue;
uint32_t val;
/* Batch subtractions from the HW counter to reduce PCIe traffic
@@ -15,8 +16,8 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
* number of PCIe writes.
*/
val = __atomic_load_n(iq->inst_cnt_ism, __ATOMIC_RELAXED);
- iq->inst_cnt += val - iq->inst_cnt_ism_prev;
- iq->inst_cnt_ism_prev = val;
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
if (val > (uint32_t)(1 << 31)) {
/* Only subtract the packet count in the HW counter
@@ -31,7 +32,7 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
rte_mb();
}
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
@@ -41,13 +42,42 @@ cnxk_vf_update_read_index(struct otx_ep_instr_queue *iq)
return iq->inst_cnt & (iq->nb_desc - 1);
}
+static inline uint32_t
+cnxk_ep_check_tx_pkt_reg(void *tx_queue)
+{
+ struct otx_ep_instr_queue *iq = (struct otx_ep_instr_queue *)tx_queue;
+ uint32_t val;
+
+ val = rte_read32(iq->inst_cnt_reg);
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /* Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write64((uint64_t)val, iq->inst_cnt_reg);
+ rte_mb();
+
+ iq->inst_cnt_prev = 0;
+ }
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ return iq->inst_cnt & (iq->nb_desc - 1);
+}
+
static inline void
cnxk_ep_flush_iq(struct otx_ep_instr_queue *iq)
{
+ const otx_ep_check_pkt_count_t cnxk_tx_pkt_count[2] = { cnxk_ep_check_tx_pkt_reg,
+ cnxk_ep_check_tx_ism_mem};
+
uint32_t instr_processed = 0;
uint32_t cnt = 0;
- iq->otx_read_index = cnxk_vf_update_read_index(iq);
+ iq->otx_read_index = cnxk_tx_pkt_count[iq->ism_ena](iq);
if (unlikely(iq->flush_index == iq->otx_read_index))
return;
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index ef275703c3..39f357ee81 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -155,7 +155,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
(void *)iq->inst_cnt_ism, ism_addr);
*iq->inst_cnt_ism = 0;
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
return 0;
@@ -240,7 +240,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
oq_no, (void *)droq->pkts_sent_ism, ism_addr);
*droq->pkts_sent_ism = 0;
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
loop = OTX_EP_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 7f4edf8dcf..25e0e5a500 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -306,7 +306,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
(void *)iq->inst_cnt_ism,
(unsigned int)ism_addr);
*iq->inst_cnt_ism = 0;
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
iq->partial_ih = ((uint64_t)otx_ep->pkind) << 36;
return 0;
@@ -392,7 +392,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
(void *)droq->pkts_sent_ism,
(unsigned int)ism_addr);
*droq->pkts_sent_ism = 0;
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index d64b04d2c2..7776940e1d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -9,6 +9,7 @@
#include <unistd.h>
#include <assert.h>
#include <rte_eal.h>
+#include <rte_kvargs.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_io.h>
@@ -223,7 +224,7 @@ struct otx_ep_instr_queue {
uint8_t *base_addr;
/* track inst count locally to consolidate HW counter updates */
- uint32_t inst_cnt_ism_prev;
+ uint32_t inst_cnt_prev;
/* Input ring index, where the driver should write the next packet */
uint32_t host_write_index;
@@ -261,6 +262,9 @@ struct otx_ep_instr_queue {
/* Number of descriptors in this ring. */
uint32_t nb_desc;
+ /* Use ISM memory */
+ uint8_t ism_ena;
+
/* Size of the descriptor. */
uint8_t desc_size;
@@ -405,9 +409,12 @@ struct otx_ep_droq {
*/
void *pkts_sent_reg;
+ /* Use ISM memory */
+ uint8_t ism_ena;
+
/* Pointer to host memory copy of output packet count, set by ISM */
uint32_t *pkts_sent_ism;
- uint32_t pkts_sent_ism_prev;
+ uint32_t pkts_sent_prev;
/* Statistics for this DROQ. */
struct otx_ep_droq_stats stats;
@@ -565,6 +572,9 @@ struct otx_ep_device {
/* Generation */
uint32_t chip_gen;
+
+ /* Use ISM memory */
+ uint8_t ism_ena;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 8daa7d225c..46211361a0 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -15,6 +15,8 @@
#define OTX_EP_DEV(_eth_dev) \
((struct otx_ep_device *)(_eth_dev)->data->dev_private)
+#define OTX_ISM_ENABLE "ism_enable"
+
static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
.nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
.nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
@@ -27,6 +29,41 @@ static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
.nb_align = OTX_EP_TXD_ALIGN,
};
+static int
+parse_flag(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+
+ *(uint8_t *)extra_args = atoi(value);
+
+ return 0;
+}
+
+static int
+otx_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx_ep_device *otx_epvf)
+{
+ struct rte_kvargs *kvlist;
+ uint8_t ism_enable = 0;
+
+ if (devargs == NULL)
+ goto null_devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ goto exit;
+
+ rte_kvargs_process(kvlist, OTX_ISM_ENABLE, &parse_flag, &ism_enable);
+ rte_kvargs_free(kvlist);
+
+null_devargs:
+ otx_epvf->ism_ena = !!ism_enable;
+
+ return 0;
+
+exit:
+ return -EINVAL;
+}
+
static void
otx_ep_set_tx_func(struct rte_eth_dev *eth_dev)
{
@@ -741,6 +778,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
+ /* Parse devargs string */
+ if (otx_ethdev_parse_devargs(eth_dev->device->devargs, otx_epvf)) {
+ otx_ep_err("Failed to parse devargs\n");
+ return -EINVAL;
+ }
+
rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
@@ -837,3 +880,5 @@ RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);
+RTE_PMD_REGISTER_PARAM_STRING(net_otx_ep,
+ OTX_ISM_ENABLE "=<0|1>");
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index aea148ee4a..59144e0f84 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -11,6 +11,7 @@
#include <rte_net.h>
#include <ethdev_pci.h>
+#include "cnxk_ep_rx.h"
#include "otx_ep_common.h"
#include "otx_ep_vf.h"
#include "otx_ep_rxtx.h"
@@ -159,6 +160,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
otx_ep->io_qmask.iq64B |= (1ull << iq_no);
iq->iqcmd_64B = (conf->iq.instr_type == 64);
+ iq->ism_ena = otx_ep->ism_ena;
/* Set up IQ registers */
ret = otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
@@ -367,6 +369,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
droq->refill_threshold = c_refill_threshold;
droq->rearm_data = otx_ep_set_rearm_data(otx_ep);
+ droq->ism_ena = otx_ep->ism_ena;
/* Set up OQ registers */
ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
@@ -460,8 +463,8 @@ otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
* number of PCIe writes.
*/
val = *iq->inst_cnt_ism;
- iq->inst_cnt += val - iq->inst_cnt_ism_prev;
- iq->inst_cnt_ism_prev = val;
+ iq->inst_cnt += val - iq->inst_cnt_prev;
+ iq->inst_cnt_prev = val;
if (val > (uint32_t)(1 << 31)) {
/*
@@ -477,7 +480,7 @@ otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
rte_mb();
}
- iq->inst_cnt_ism_prev = 0;
+ iq->inst_cnt_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
@@ -856,8 +859,8 @@ otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
* number of PCIe writes.
*/
val = *droq->pkts_sent_ism;
- new_pkts = val - droq->pkts_sent_ism_prev;
- droq->pkts_sent_ism_prev = val;
+ new_pkts = val - droq->pkts_sent_prev;
+ droq->pkts_sent_prev = val;
if (val > (uint32_t)(1 << 31)) {
/*
@@ -873,7 +876,7 @@ otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
rte_mb();
}
- droq->pkts_sent_ism_prev = 0;
+ droq->pkts_sent_prev = 0;
}
rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.h b/drivers/net/octeon_ep/otx_ep_rxtx.h
index f5bc807dc0..6b3abe21b1 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.h
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.h
@@ -24,6 +24,8 @@
#define DROQ_REFILL_THRESHOLD 64
#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
+typedef uint32_t (*otx_ep_check_pkt_count_t)(void *queue);
+
static inline uint32_t
otx_ep_incr_index(uint32_t index, uint32_t count, uint32_t max)
{
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v2 1/1] net/octeon_ep: use devarg to enable ISM accesses
2024-02-26 8:59 ` [PATCH v2 " Vamsi Attunuru
@ 2024-02-29 16:32 ` Jerin Jacob
0 siblings, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2024-02-29 16:32 UTC (permalink / raw)
To: Vamsi Attunuru; +Cc: dev, jerinj
On Mon, Feb 26, 2024 at 2:55 PM Vamsi Attunuru <vattunuru@marvell.com> wrote:
>
> Adds a devarg option to enable/disable ISM memory accesses
> for reading packet count details. This option is disabled
> by default, as ISM memory accesses effect throughput of
> bigger size packets.
>
> Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
Updated the git commit as follows and applied to
dpdk-next-net-mrvl/for-main. Thanks
net/octeon_ep: enable ISM accesses via devarg
Adds a devarg option to enable/disable ISM memory accesses
for reading packet count details. This option is disabled
by default, as ISM memory accesses effect throughput of
bigger size packets.
Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-02-29 16:32 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-23 15:39 [PATCH 1/1] net/octeon_ep: use devarg to enable ISM accesses Vamsi Attunuru
2024-02-25 15:39 ` Jerin Jacob
2024-02-26 8:59 ` [PATCH v2 " Vamsi Attunuru
2024-02-29 16:32 ` Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).