From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com,
Lance Richardson <lance.richardson@broadcom.com>,
stable@dpdk.org, Somnath Kotur <somnath.kotur@broadcom.com>,
Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
Subject: [dpdk-dev] [PATCH v2 5/9] net/bnxt: use common receive transmit nq ring
Date: Thu, 3 Oct 2019 20:48:59 -0700 [thread overview]
Message-ID: <20191004034903.85233-6-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20191004034903.85233-1-ajit.khaparde@broadcom.com>
From: Lance Richardson <lance.richardson@broadcom.com>
Thor queue scaling is currently limited by the number of NQs that
can be allocated. Fix by using a common NQ for all receive/transmit
rings instead of allocating a separate NQ for each ring.
Fixes: f8168ca0e690 ("net/bnxt: support thor controller")
Cc: stable@dpdk.org
Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Kalesh Anakkur Purayil <kalesh-anakkur.purayil@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 1 +
drivers/net/bnxt/bnxt_ethdev.c | 5 ++
drivers/net/bnxt/bnxt_hwrm.c | 7 +--
drivers/net/bnxt/bnxt_ring.c | 107 ++++++++++++++++++++++-----------
drivers/net/bnxt/bnxt_ring.h | 2 +
drivers/net/bnxt/bnxt_rxq.c | 4 +-
drivers/net/bnxt/bnxt_rxq.h | 1 -
drivers/net/bnxt/bnxt_rxr.c | 27 ---------
drivers/net/bnxt/bnxt_txq.c | 4 +-
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 25 --------
11 files changed, 84 insertions(+), 100 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 5cfe5ee2c7..ad0b18dddd 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -497,6 +497,7 @@ struct bnxt {
/* Default completion ring */
struct bnxt_cp_ring_info *async_cp_ring;
+ struct bnxt_cp_ring_info *rxtx_nq_ring;
uint32_t max_ring_grps;
struct bnxt_ring_grp_info *grp_info;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 9adcd94ff8..2845e9185a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -223,6 +223,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
bnxt_free_rx_rings(bp);
}
bnxt_free_async_cp_ring(bp);
+ bnxt_free_rxtx_nq_ring(bp);
}
static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
@@ -253,6 +254,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
if (rc)
goto alloc_mem_err;
+ rc = bnxt_alloc_rxtx_nq_ring(bp);
+ if (rc)
+ goto alloc_mem_err;
+
return 0;
alloc_mem_err:
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 76ef004237..b5211aea75 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -2325,11 +2325,8 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
bp->grp_info[queue_index].ag_fw_ring_id =
INVALID_HW_RING_ID;
}
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
bnxt_free_cp_ring(bp, cpr);
- if (rxq->nq_ring)
- bnxt_free_nq_ring(bp, rxq->nq_ring);
- }
if (BNXT_HAS_RING_GRPS(bp))
bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -2361,8 +2358,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- if (txq->nq_ring)
- bnxt_free_nq_ring(bp, txq->nq_ring);
}
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index cf0c24c9dc..19fc45395d 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -125,7 +125,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
cp_vmem_len = RTE_ALIGN(cp_vmem_len, 128);
- int nq_vmem_len = BNXT_CHIP_THOR(bp) ?
+ int nq_vmem_len = nq_ring_info ?
RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size) : 0;
nq_vmem_len = RTE_ALIGN(nq_vmem_len, 128);
@@ -159,7 +159,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
nq_ring_start = cp_ring_start + cp_ring_len;
nq_ring_start = RTE_ALIGN(nq_ring_start, 4096);
- int nq_ring_len = BNXT_CHIP_THOR(bp) ? cp_ring_len : 0;
+ int nq_ring_len = nq_ring_info ? cp_ring_len : 0;
int tx_ring_start = nq_ring_start + nq_ring_len;
tx_ring_start = RTE_ALIGN(tx_ring_start, 4096);
@@ -403,12 +403,12 @@ static void bnxt_set_db(struct bnxt *bp,
}
static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
- struct bnxt_cp_ring_info *cpr,
- struct bnxt_cp_ring_info *nqr)
+ struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
+ struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
uint8_t ring_type;
int rc = 0;
@@ -436,31 +436,85 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
return 0;
}
-static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
- struct bnxt_cp_ring_info *nqr)
+int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp)
{
- struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
- int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
+ struct bnxt_cp_ring_info *nqr;
+ struct bnxt_ring *ring;
+ int ring_index = BNXT_NUM_ASYNC_CPR(bp);
+ unsigned int socket_id;
uint8_t ring_type;
int rc = 0;
- if (!BNXT_HAS_NQ(bp))
- return -EINVAL;
+ if (!BNXT_HAS_NQ(bp) || bp->rxtx_nq_ring)
+ return 0;
+
+ socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+ nqr = rte_zmalloc_socket("nqr",
+ sizeof(struct bnxt_cp_ring_info),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (nqr == NULL)
+ return -ENOMEM;
+
+ ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+ sizeof(struct bnxt_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (ring == NULL) {
+ rte_free(nqr);
+ return -ENOMEM;
+ }
+
+ ring->bd = (void *)nqr->cp_desc_ring;
+ ring->bd_dma = nqr->cp_desc_mapping;
+ ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+ ring->ring_mask = ring->ring_size - 1;
+ ring->vmem_size = 0;
+ ring->vmem = NULL;
+
+ nqr->cp_ring_struct = ring;
+ rc = bnxt_alloc_rings(bp, 0, NULL, NULL, nqr, NULL, "l2_nqr");
+ if (rc) {
+ rte_free(ring);
+ rte_free(nqr);
+ return -ENOMEM;
+ }
ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
- rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
+ rc = bnxt_hwrm_ring_alloc(bp, ring, ring_type, ring_index,
HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, 0);
- if (rc)
+ if (rc) {
+ rte_free(ring);
+ rte_free(nqr);
return rc;
+ }
- bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
- nq_ring->fw_ring_id);
+ bnxt_set_db(bp, &nqr->cp_db, ring_type, ring_index,
+ ring->fw_ring_id);
bnxt_db_nq(nqr);
+ bp->rxtx_nq_ring = nqr;
+
return 0;
}
+/* Free RX/TX NQ ring. */
+void bnxt_free_rxtx_nq_ring(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *nqr = bp->rxtx_nq_ring;
+
+ if (!nqr)
+ return;
+
+ bnxt_free_nq_ring(bp, nqr);
+
+ bnxt_free_ring(nqr->cp_ring_struct);
+ rte_free(nqr->cp_ring_struct);
+ nqr->cp_ring_struct = NULL;
+ rte_free(nqr);
+ bp->rxtx_nq_ring = NULL;
+}
+
static int bnxt_alloc_rx_ring(struct bnxt *bp, int queue_index)
{
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
@@ -529,17 +583,10 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
- struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
int rc;
- if (BNXT_HAS_NQ(bp)) {
- rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
- if (rc)
- goto err_out;
- }
-
- rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
+ rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr);
if (rc)
goto err_out;
@@ -644,16 +691,10 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- if (BNXT_HAS_NQ(bp)) {
- if (bnxt_alloc_nq_ring(bp, i, nqr))
- goto err_out;
- }
-
- if (bnxt_alloc_cmpl_ring(bp, i, cpr, nqr))
+ if (bnxt_alloc_cmpl_ring(bp, i, cpr))
goto err_out;
if (BNXT_HAS_RING_GRPS(bp)) {
@@ -697,18 +738,12 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
- struct bnxt_cp_ring_info *nqr = txq->nq_ring;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
unsigned int idx = i + bp->rx_cp_nr_rings;
uint16_t tx_cosq_id = 0;
- if (BNXT_HAS_NQ(bp)) {
- if (bnxt_alloc_nq_ring(bp, idx, nqr))
- goto err_out;
- }
-
- if (bnxt_alloc_cmpl_ring(bp, idx, cpr, nqr))
+ if (bnxt_alloc_cmpl_ring(bp, idx, cpr))
goto err_out;
if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index a5d5106986..833118391b 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -78,6 +78,8 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp);
int bnxt_alloc_async_cp_ring(struct bnxt *bp);
void bnxt_free_async_cp_ring(struct bnxt *bp);
int bnxt_alloc_async_ring_struct(struct bnxt *bp);
+int bnxt_alloc_rxtx_nq_ring(struct bnxt *bp);
+void bnxt_free_rxtx_nq_ring(struct bnxt *bp);
static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
{
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 5d291cbafd..9439fcd1fb 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -341,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
- rxq->nq_ring, "rxr")) {
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, NULL,
+ "rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 3693d89a60..4f5182d9e9 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -39,7 +39,6 @@ struct bnxt_rx_queue {
uint32_t rx_buf_size;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
- struct bnxt_cp_ring_info *nq_ring;
rte_atomic64_t rx_mbuf_alloc_fail;
const struct rte_memzone *mz;
};
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 1a6fb7944b..bda4f4c1b9 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -742,7 +742,6 @@ void bnxt_free_rx_rings(struct bnxt *bp)
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
- struct bnxt_cp_ring_info *nqr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring *ring;
@@ -789,32 +788,6 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id)
ring->vmem_size = 0;
ring->vmem = NULL;
- if (BNXT_HAS_NQ(rxq->bp)) {
- nqr = rte_zmalloc_socket("bnxt_rx_ring_cq",
- sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (nqr == NULL)
- return -ENOMEM;
-
- rxq->nq_ring = nqr;
-
- ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
- sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (ring == NULL)
- return -ENOMEM;
-
- nqr->cp_ring_struct = ring;
- ring->ring_size =
- rte_align32pow2(rxr->rx_ring_struct->ring_size *
- (2 + AGG_RING_SIZE_FACTOR));
- ring->ring_mask = ring->ring_size - 1;
- ring->bd = (void *)nqr->cp_desc_ring;
- ring->bd_dma = nqr->cp_desc_mapping;
- ring->vmem_size = 0;
- ring->vmem = NULL;
- }
-
/* Allocate Aggregator rings */
ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
sizeof(struct bnxt_ring),
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index ea20d737fe..5ad4ee155e 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -141,8 +141,8 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
- txq->nq_ring, "txr")) {
+ if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, NULL,
+ "txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index 7a442516d2..37a3f9539f 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -33,7 +33,6 @@ struct bnxt_tx_queue {
unsigned int cp_nr_rings;
struct bnxt_cp_ring_info *cp_ring;
- struct bnxt_cp_ring_info *nq_ring;
const struct rte_memzone *mz;
struct rte_mbuf **free;
};
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 0ed6581bed..6e2ee86c05 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -57,7 +57,6 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
{
struct bnxt_cp_ring_info *cpr;
- struct bnxt_cp_ring_info *nqr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring *ring;
@@ -101,30 +100,6 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
ring->vmem_size = 0;
ring->vmem = NULL;
- if (BNXT_HAS_NQ(txq->bp)) {
- nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
- sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (nqr == NULL)
- return -ENOMEM;
-
- txq->nq_ring = nqr;
-
- ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
- sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (ring == NULL)
- return -ENOMEM;
-
- nqr->cp_ring_struct = ring;
- ring->ring_size = txr->tx_ring_struct->ring_size;
- ring->ring_mask = ring->ring_size - 1;
- ring->bd = (void *)nqr->cp_desc_ring;
- ring->bd_dma = nqr->cp_desc_mapping;
- ring->vmem_size = 0;
- ring->vmem = NULL;
- }
-
return 0;
}
--
2.20.1 (Apple Git-117)
next prev parent reply other threads:[~2019-10-04 3:55 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-10-04 3:48 [dpdk-dev] [PATCH v2 0/9] bnxt patchset Ajit Khaparde
2019-10-04 3:48 ` [dpdk-dev] [PATCH v2 1/9] net/bnxt: increase tqm entry allocation Ajit Khaparde
2019-10-04 3:48 ` [dpdk-dev] [PATCH v2 2/9] net/bnxt: fix ring alignment for thor-based adapters Ajit Khaparde
2019-10-04 3:48 ` [dpdk-dev] [PATCH v2 3/9] net/bnxt: add support for LRO on thor adapters Ajit Khaparde
2019-10-04 3:48 ` [dpdk-dev] [PATCH v2 4/9] net/bnxt: add support for CoS classification Ajit Khaparde
2019-10-04 3:48 ` Ajit Khaparde [this message]
2019-10-04 3:49 ` [dpdk-dev] [PATCH v2 6/9] net/bnxt: fix stats context calculation Ajit Khaparde
2019-10-04 3:49 ` [dpdk-dev] [PATCH v2 7/9] net/bnxt: use correct default Rx queue for thor Ajit Khaparde
2019-10-04 3:49 ` [dpdk-dev] [PATCH v2 8/9] net/bnxt: advertise scatter receive offload capability Ajit Khaparde
2019-10-04 3:49 ` [dpdk-dev] [PATCH v2 9/9] net/bnxt: improve CPR handling in vector PMD Ajit Khaparde
2019-10-07 17:35 ` [dpdk-dev] [PATCH v2 0/9] bnxt patchset Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191004034903.85233-6-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=kalesh-anakkur.purayil@broadcom.com \
--cc=lance.richardson@broadcom.com \
--cc=somnath.kotur@broadcom.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).