From: Sathesh Edara <sedara@marvell.com>
To: <sburla@marvell.com>, <jerinj@marvell.com>, <sedara@marvell.com>,
"Radha Mohan Chintakuntla" <radhac@marvell.com>,
Veerasenareddy Burru <vburru@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH v3 06/11] net/octeon_ep: support ISM
Date: Mon, 24 Apr 2023 05:55:06 -0700 [thread overview]
Message-ID: <20230424125512.40013-7-sedara@marvell.com> (raw)
In-Reply-To: <20230424125512.40013-1-sedara@marvell.com>
Adds the ISM specific functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 35 +++++++++++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 ++++++
drivers/net/octeon_ep/otx2_ep_vf.c | 45 ++++++++++++++++++---
drivers/net/octeon_ep/otx2_ep_vf.h | 14 +++++++
drivers/net/octeon_ep/otx_ep_common.h | 16 ++++++++
drivers/net/octeon_ep/otx_ep_ethdev.c | 36 +++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 56 +++++++++++++++++++++------
7 files changed, 194 insertions(+), 20 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 1a92887109..a437ae68cb 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -2,11 +2,12 @@
* Copyright(C) 2022 Marvell.
*/
+#include <inttypes.h>
#include <errno.h>
#include <rte_common.h>
#include <rte_cycles.h>
-
+#include <rte_memzone.h>
#include "cnxk_ep_vf.h"
static void
@@ -85,6 +86,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -132,6 +134,19 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
+ (void *)iq->inst_cnt_ism, ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
return 0;
}
@@ -142,6 +157,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
uint64_t oq_ctl = 0ull;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
+ uint64_t ism_addr;
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as long as IDLE is 0
@@ -201,9 +217,22 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
+ oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index aaa5774552..072b38ea15 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -27,6 +27,7 @@
#define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_EP_R_IN_PKT_CNT_START 0x10080
#define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_EP_R_IN_CNTS_ISM_START 0x10520
#define CNXK_EP_R_IN_CONTROL(ring) \
(CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -55,6 +56,8 @@
#define CNXK_EP_R_IN_BYTE_CNT(ring) \
(CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_IN_CNTS_ISM(ring) \
+ (CNXK_EP_R_IN_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
/** Rings per Virtual Function **/
#define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
@@ -87,6 +90,7 @@
#define CNXK_EP_R_OUT_ENABLE_START 0x10170
#define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
+#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -118,6 +122,9 @@
#define CNXK_EP_R_OUT_BYTE_CNT(ring) \
(CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
@@ -161,4 +168,9 @@ struct cnxk_ep_instr_64B {
uint64_t exhdr[4];
};
+#define CNXK_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define CNXK_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define CNXK_EP_ISM_EN (0x1)
+#define CNXK_EP_ISM_MSIX_DIS (0x2)
+
#endif /*_CNXK_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3e4895862b..ced3a415a5 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -6,6 +6,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_memzone.h>
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
@@ -236,6 +237,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
int loop = SDP_VF_BUSY_LOOP_COUNT;
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
@@ -282,6 +284,22 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
+ (void *)iq->inst_cnt_ism,
+ (unsigned int)ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+
return 0;
}
@@ -290,6 +308,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ uint64_t ism_addr;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -351,18 +370,32 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
+ (void *)droq->pkts_sent_ism,
+ (unsigned int)ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
-
- if (loop < 0) {
- otx_ep_err("Packets sent register value is not cleared\n");
+ if (loop < 0)
return -EIO;
- }
- otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
+ rte_read32(droq->pkts_sent_reg));
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 36c0b25dea..7c799475ab 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -42,6 +42,7 @@
#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+#define SDP_VF_R_IN_CNTS_ISM_START (0x10520)
#define SDP_VF_R_IN_CONTROL(ring) \
(SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -70,6 +71,9 @@
#define SDP_VF_R_IN_BYTE_CNT(ring) \
(SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_IN_CNTS_ISM(ring) \
+ (SDP_VF_R_IN_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF OQ Registers */
#define SDP_VF_R_OUT_CNTS_START (0x10100)
#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
@@ -80,6 +84,7 @@
#define SDP_VF_R_OUT_ENABLE_START (0x10160)
#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+#define SDP_VF_R_OUT_CNTS_ISM_START (0x10510)
#define SDP_VF_R_OUT_CONTROL(ring) \
(SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -108,6 +113,9 @@
#define SDP_VF_R_OUT_BYTE_CNT(ring) \
(SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_OUT_CNTS_ISM(ring) \
+ (SDP_VF_R_OUT_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF IQ Masks */
#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
#define SDP_VF_R_IN_CTL_RPVF_POS (48)
@@ -143,6 +151,12 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+#define OTX2_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define OTX2_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define OTX2_EP_ISM_EN (0x1)
+#define OTX2_EP_ISM_MSIX_DIS (0x2)
+#define OTX2_EP_MAX_RX_PKT_LEN (16384)
+
union out_int_lvl_t {
uint64_t d64;
struct {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index a3260d5243..76528ed49d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -185,6 +185,9 @@ struct otx_ep_instr_queue {
*/
uint32_t flush_index;
+ /* Free-running/wrapping instruction counter for IQ. */
+ uint32_t inst_cnt;
+
/* This keeps track of the instructions pending in this queue. */
uint64_t instr_pending;
@@ -211,6 +214,12 @@ struct otx_ep_instr_queue {
/* Memory zone */
const struct rte_memzone *iq_mz;
+
+ /* Location in memory updated by SDP ISM */
+ uint32_t *inst_cnt_ism;
+
+ /* track inst count locally to consolidate HW counter updates */
+ uint32_t inst_cnt_ism_prev;
};
/** Descriptor format.
@@ -355,6 +364,10 @@ struct otx_ep_droq {
const struct rte_memzone *desc_ring_mz;
const struct rte_memzone *info_mz;
+
+ /* Pointer to host memory copy of output packet count, set by ISM */
+ uint32_t *pkts_sent_ism;
+ uint32_t pkts_sent_ism_prev;
};
#define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
@@ -459,6 +472,9 @@ struct otx_ep_device {
uint64_t rx_offloads;
uint64_t tx_offloads;
+
+ /* DMA buffer for SDP ISM messages */
+ const struct rte_memzone *ism_buffer_mz;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 5677a2d6a6..30a7a450fb 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -2,6 +2,7 @@
* Copyright(C) 2021 Marvell.
*/
+#include <inttypes.h>
#include <ethdev_pci.h>
#include "otx_ep_common.h"
@@ -90,6 +91,32 @@ otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
return 0;
}
+/*
+ * We only need 2 uint32_t locations per IOQ, but separate these so
+ * each IOQ has the variables on its own cache line.
+ */
+#define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
+static int
+otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
+{
+ otx_epvf->ism_buffer_mz =
+ rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
+ 0, OTX_EP_ISM_BUFFER_SIZE,
+ OTX_EP_PCI_RING_ALIGN, 0);
+
+ /* Same DMA buffer is shared by OQ and IQ, clear it at start */
+ memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
+ if (otx_epvf->ism_buffer_mz == NULL) {
+ otx_ep_err("Failed to allocate ISM buffer\n");
+ return(-1);
+ }
+ otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
+ (void *)otx_epvf->ism_buffer_mz->addr,
+ otx_epvf->ism_buffer_mz->iova);
+
+ return 0;
+}
+
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
@@ -110,6 +137,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
case PCI_DEVID_CN10KA_EP_NET_VF:
case PCI_DEVID_CN10KB_EP_NET_VF:
@@ -118,6 +147,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
default:
otx_ep_err("Unsupported device\n");
@@ -434,6 +465,11 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
+ if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
+ otx_ep_err("Failed to delete ISM buffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 9712e6cce6..c4153bd583 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -20,6 +20,7 @@
#define OTX_EP_INFO_SIZE 8
#define OTX_EP_FSZ_FS0 0
#define DROQ_REFILL_THRESHOLD 16
+#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
static void
otx_ep_dmazone_free(const struct rte_memzone *mz)
@@ -412,15 +413,32 @@ otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
static uint32_t
otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
{
- uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
- if (unlikely(new_idx == 0xFFFFFFFFU))
- rte_write32(new_idx, iq->inst_cnt_reg);
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *iq->inst_cnt_ism;
+ iq->inst_cnt += val - iq->inst_cnt_ism_prev;
+ iq->inst_cnt_ism_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, iq->inst_cnt_reg);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
+
/* Modulo of the new index with the IQ size will give us
* the new index.
*/
- new_idx &= (iq->nb_desc - 1);
-
- return new_idx;
+ return iq->inst_cnt & (iq->nb_desc - 1);
}
static void
@@ -962,14 +980,30 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
static inline uint32_t
otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
{
- volatile uint64_t pkt_count;
uint32_t new_pkts;
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *droq->pkts_sent_ism;
+ new_pkts = val - droq->pkts_sent_ism_prev;
+ droq->pkts_sent_ism_prev = val;
- /* Latest available OQ packets */
- pkt_count = rte_read32(droq->pkts_sent_reg);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- new_pkts = pkt_count;
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, droq->pkts_sent_reg);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
+
return new_pkts;
}
--
2.31.1
next prev parent reply other threads:[~2023-04-24 14:36 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-04 14:18 [PATCH v1 0/9] extend octeon ep driver functionality Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 1/9] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 02/10] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 03/10] net/octeon_ep: support error propagation Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 04/10] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 05/10] net/octeon_ep: support ISM Sathesh Edara
2023-04-21 4:56 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 06/10] net/octeon_ep: fix DMA incompletion Sathesh Edara
2023-04-21 4:51 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 07/10] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
2023-04-21 5:00 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 09/10] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-21 5:03 ` Jerin Jacob
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
2023-05-03 7:16 ` Jerin Jacob
2023-04-24 12:55 ` Sathesh Edara [this message]
2023-04-24 12:55 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 2/9] net/octeon_ep: support CNX10K series SoC Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 3/9] net/octeon_ep: support error propagation Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 4/9] net/octeon_ep: support IQ/OQ reset Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 5/9] net/octeon_ep: support ISM Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 6/9] net/octeon_ep: fix DMA incompletion Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 7/9] net/octeon_ep: update queue size checks Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 8/9] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
2023-04-04 14:18 ` [PATCH v1 9/9] net/octeon_ep: set watermark for output queues Sathesh Edara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230424125512.40013-7-sedara@marvell.com \
--to=sedara@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=radhac@marvell.com \
--cc=sburla@marvell.com \
--cc=vburru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).