From: Junlong Wang <wang.junlong1@zte.com.cn>
To: stephen@networkplumber.org
Cc: dev@dpdk.org, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v5 07/15] net/zxdh: provided dev simple tx implementations
Date: Mon, 23 Dec 2024 19:02:41 +0800 [thread overview]
Message-ID: <20241223110249.1483277-8-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20241223110249.1483277-1-wang.junlong1@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 18454 bytes --]
provided dev simple tx implementations.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 22 ++
drivers/net/zxdh/zxdh_queue.h | 26 ++-
drivers/net/zxdh/zxdh_rxtx.c | 396 +++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_rxtx.h | 4 +
5 files changed, 448 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/zxdh/zxdh_rxtx.c
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index 5b3af87c5b..20b2cf484a 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -21,4 +21,5 @@ sources = files(
'zxdh_queue.c',
'zxdh_np.c',
'zxdh_tables.c',
+ 'zxdh_rxtx.c',
)
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 6e603b967e..aef77e86a0 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -15,6 +15,7 @@
#include "zxdh_queue.h"
#include "zxdh_np.h"
#include "zxdh_tables.h"
+#include "zxdh_rxtx.h"
struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
struct zxdh_shared_data *zxdh_shared_data;
@@ -956,6 +957,25 @@ zxdh_dev_close(struct rte_eth_dev *dev)
return ret;
}
+static int32_t
+zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)
+{
+ struct zxdh_hw *hw = eth_dev->data->dev_private;
+
+ if (!zxdh_pci_packed_queue(hw)) {
+ PMD_DRV_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);
+ return -1;
+ }
+ if (!zxdh_pci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {
+ PMD_DRV_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);
+ return -1;
+ }
+ eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;
+ eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;
+
+ return 0;
+}
+
static int
zxdh_dev_start(struct rte_eth_dev *dev)
{
@@ -971,6 +991,8 @@ zxdh_dev_start(struct rte_eth_dev *dev)
if (ret < 0)
return ret;
}
+
+ zxdh_set_rxtx_funcs(dev);
ret = zxdh_intr_enable(dev);
if (ret) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 6513aec3f0..9343df81ac 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -21,8 +21,15 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
#define ZXDH_TQ_QUEUE_IDX 1
#define ZXDH_MAX_TX_INDIRECT 8
+/* This marks a buffer as continuing via the next field. */
+#define ZXDH_VRING_DESC_F_NEXT 1
+
/* This marks a buffer as write-only (otherwise read-only). */
-#define ZXDH_VRING_DESC_F_WRITE 2
+#define ZXDH_VRING_DESC_F_WRITE 2
+
+/* This means the buffer contains a list of buffer descriptors. */
+#define ZXDH_VRING_DESC_F_INDIRECT 4
+
/* This flag means the descriptor was made available by the driver */
#define ZXDH_VRING_PACKED_DESC_F_AVAIL (1 << (7))
#define ZXDH_VRING_PACKED_DESC_F_USED (1 << (15))
@@ -35,11 +42,17 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
#define ZXDH_RING_EVENT_FLAGS_DISABLE 0x1
#define ZXDH_RING_EVENT_FLAGS_DESC 0x2
+#define ZXDH_RING_F_INDIRECT_DESC 28
+
#define ZXDH_VQ_RING_DESC_CHAIN_END 32768
#define ZXDH_QUEUE_DEPTH 1024
#define ZXDH_RQ_QUEUE_IDX 0
#define ZXDH_TQ_QUEUE_IDX 1
+#define ZXDH_TYPE_HDR_SIZE sizeof(struct zxdh_type_hdr)
+#define ZXDH_PI_HDR_SIZE sizeof(struct zxdh_pi_hdr)
+#define ZXDH_DL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_dl)
+#define ZXDH_UL_NET_HDR_SIZE sizeof(struct zxdh_net_hdr_ul)
/*
* ring descriptors: 16 bytes.
@@ -355,6 +368,17 @@ static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq)
ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
}
+static inline int32_t
+zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)
+{
+ uint16_t flags = 0;
+
+ zxdh_mb(vq->hw->weak_barriers);
+ flags = vq->vq_packed.ring.device->desc_event_flags;
+
+ return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);
+}
+
struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
int32_t zxdh_free_queues(struct rte_eth_dev *dev);
int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
new file mode 100644
index 0000000000..10034a0e98
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <stdalign.h>
+
+#include <rte_net.h>
+
+#include "zxdh_logs.h"
+#include "zxdh_pci.h"
+#include "zxdh_queue.h"
+
+#define ZXDH_PKT_FORM_CPU 0x20 /* 1-cpu 0-np */
+#define ZXDH_NO_IP_FRAGMENT 0x2000 /* ip fragment flag */
+#define ZXDH_NO_IPID_UPDATE 0x4000 /* ipid update flag */
+
+#define ZXDH_PI_L3TYPE_IP 0x00
+#define ZXDH_PI_L3TYPE_IPV6 0x40
+#define ZXDH_PI_L3TYPE_NOIP 0x80
+#define ZXDH_PI_L3TYPE_RSV 0xC0
+#define ZXDH_PI_L3TYPE_MASK 0xC0
+
+#define ZXDH_PCODE_MASK 0x1F
+#define ZXDH_PCODE_IP_PKT_TYPE 0x01
+#define ZXDH_PCODE_TCP_PKT_TYPE 0x02
+#define ZXDH_PCODE_UDP_PKT_TYPE 0x03
+#define ZXDH_PCODE_NO_IP_PKT_TYPE 0x09
+#define ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE 0x0C
+
+#define ZXDH_TX_MAX_SEGS 31
+#define ZXDH_RX_MAX_SEGS 31
+
+static void
+zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)
+{
+ uint16_t used_idx = 0;
+ uint16_t id = 0;
+ uint16_t curr_id = 0;
+ uint16_t free_cnt = 0;
+ uint16_t size = vq->vq_nentries;
+ struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ struct zxdh_vq_desc_extra *dxp = NULL;
+
+ used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_io_rmb inside
+ * and wait for used desc in virtqueue.
+ */
+ while (num > 0 && zxdh_desc_used(&desc[used_idx], vq)) {
+ id = desc[used_idx].id;
+ do {
+ curr_id = used_idx;
+ dxp = &vq->vq_descx[used_idx];
+ used_idx += dxp->ndescs;
+ free_cnt += dxp->ndescs;
+ num -= dxp->ndescs;
+ if (used_idx >= size) {
+ used_idx -= size;
+ vq->vq_packed.used_wrap_counter ^= 1;
+ }
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ } while (curr_id != id);
+ }
+ vq->vq_used_cons_idx = used_idx;
+ vq->vq_free_cnt += free_cnt;
+}
+
+static void
+zxdh_ring_free_id_packed(struct zxdh_virtqueue *vq, uint16_t id)
+{
+ struct zxdh_vq_desc_extra *dxp = NULL;
+
+ dxp = &vq->vq_descx[id];
+ vq->vq_free_cnt += dxp->ndescs;
+
+ if (vq->vq_desc_tail_idx == ZXDH_VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_head_idx = id;
+ else
+ vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+
+ vq->vq_desc_tail_idx = id;
+ dxp->next = ZXDH_VQ_RING_DESC_CHAIN_END;
+}
+
+static void
+zxdh_xmit_cleanup_normal_packed(struct zxdh_virtqueue *vq, int32_t num)
+{
+ uint16_t used_idx = 0;
+ uint16_t id = 0;
+ uint16_t size = vq->vq_nentries;
+ struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ struct zxdh_vq_desc_extra *dxp = NULL;
+
+ used_idx = vq->vq_used_cons_idx;
+ /* desc_is_used has a load-acquire or rte_io_rmb inside
+ * and wait for used desc in virtqueue.
+ */
+ while (num-- && zxdh_desc_used(&desc[used_idx], vq)) {
+ id = desc[used_idx].id;
+ dxp = &vq->vq_descx[id];
+ vq->vq_used_cons_idx += dxp->ndescs;
+ if (vq->vq_used_cons_idx >= size) {
+ vq->vq_used_cons_idx -= size;
+ vq->vq_packed.used_wrap_counter ^= 1;
+ }
+ zxdh_ring_free_id_packed(vq, id);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ used_idx = vq->vq_used_cons_idx;
+ }
+}
+
+static void
+zxdh_xmit_cleanup_packed(struct zxdh_virtqueue *vq, int32_t num, int32_t in_order)
+{
+ if (in_order)
+ zxdh_xmit_cleanup_inorder_packed(vq, num);
+ else
+ zxdh_xmit_cleanup_normal_packed(vq, num);
+}
+
+static uint8_t
+zxdh_xmit_get_ptype(struct rte_mbuf *m)
+{
+ uint8_t pcode = ZXDH_PCODE_NO_IP_PKT_TYPE;
+ uint8_t l3_ptype = ZXDH_PI_L3TYPE_NOIP;
+
+ if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV4 ||
+ ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) &&
+ (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4)) {
+ l3_ptype = ZXDH_PI_L3TYPE_IP;
+ pcode = ZXDH_PCODE_IP_PKT_TYPE;
+ } else if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV6 ||
+ ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) &&
+ (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6)) {
+ l3_ptype = ZXDH_PI_L3TYPE_IPV6;
+ pcode = ZXDH_PCODE_IP_PKT_TYPE;
+ } else {
+ goto end;
+ }
+
+ if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_TCP ||
+ ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) &&
+ (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP))
+ pcode = ZXDH_PCODE_TCP_PKT_TYPE;
+ else if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP ||
+ ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) &&
+ (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP))
+ pcode = ZXDH_PCODE_UDP_PKT_TYPE;
+
+end:
+ return l3_ptype | ZXDH_PKT_FORM_CPU | pcode;
+}
+
+static void zxdh_xmit_fill_net_hdr(struct rte_mbuf *cookie,
+ struct zxdh_net_hdr_dl *hdr)
+{
+ uint16_t pkt_flag_lw16 = ZXDH_NO_IPID_UPDATE;
+ uint16_t l3_offset;
+ uint32_t ol_flag = 0;
+
+ hdr->pi_hdr.pkt_flag_lw16 = rte_be_to_cpu_16(pkt_flag_lw16);
+
+ hdr->pi_hdr.pkt_type = zxdh_xmit_get_ptype(cookie);
+ l3_offset = ZXDH_DL_NET_HDR_SIZE + cookie->outer_l2_len +
+ cookie->outer_l3_len + cookie->l2_len;
+ hdr->pi_hdr.l3_offset = rte_be_to_cpu_16(l3_offset);
+ hdr->pi_hdr.l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len);
+
+ hdr->pd_hdr.ol_flag = rte_be_to_cpu_32(ol_flag);
+}
+
+static inline void zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,
+ struct rte_mbuf *cookie, int32_t in_order)
+{
+ struct zxdh_virtqueue *vq = txvq->vq;
+ uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+ struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];
+ uint16_t flags = vq->vq_packed.cached_flags;
+ struct zxdh_net_hdr_dl *hdr = NULL;
+
+ dxp->ndescs = 1;
+ dxp->cookie = cookie;
+ hdr = rte_pktmbuf_mtod_offset(cookie, struct zxdh_net_hdr_dl *, -ZXDH_DL_NET_HDR_SIZE);
+ zxdh_xmit_fill_net_hdr(cookie, hdr);
+
+ uint16_t idx = vq->vq_avail_idx;
+ struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[idx];
+
+ dp->addr = rte_pktmbuf_iova(cookie) - ZXDH_DL_NET_HDR_SIZE;
+ dp->len = cookie->data_len + ZXDH_DL_NET_HDR_SIZE;
+ dp->id = id;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+ vq->vq_free_cnt--;
+ if (!in_order) {
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;
+ }
+ zxdh_queue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
+}
+
+static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,
+ struct rte_mbuf *cookie,
+ uint16_t needed,
+ int32_t use_indirect,
+ int32_t in_order)
+{
+ struct zxdh_tx_region *txr = txvq->zxdh_net_hdr_mz->addr;
+ struct zxdh_virtqueue *vq = txvq->vq;
+ struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+ void *hdr = NULL;
+ uint16_t head_idx = vq->vq_avail_idx;
+ uint16_t idx = head_idx;
+ uint16_t prev = head_idx;
+ uint16_t head_flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;
+ uint16_t seg_num = cookie->nb_segs;
+ uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
+ struct zxdh_vring_packed_desc *head_dp = &vq->vq_packed.ring.desc[idx];
+ struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];
+
+ dxp->ndescs = needed;
+ dxp->cookie = cookie;
+ head_flags |= vq->vq_packed.cached_flags;
+ /* if offload disabled, it is not zeroed below, do it now */
+
+ if (use_indirect) {
+ /**
+ * setup tx ring slot to point to indirect
+ * descriptor list stored in reserved region.
+ * the first slot in indirect ring is already
+ * preset to point to the header in reserved region
+ **/
+ start_dp[idx].addr =
+ txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
+ start_dp[idx].len = (seg_num + 1) * sizeof(struct zxdh_vring_packed_desc);
+ /* Packed descriptor id needs to be restored when inorder. */
+ if (in_order)
+ start_dp[idx].id = idx;
+
+ /* reset flags for indirect desc */
+ head_flags = ZXDH_VRING_DESC_F_INDIRECT;
+ head_flags |= vq->vq_packed.cached_flags;
+ hdr = (void *)&txr[idx].tx_hdr;
+ /* loop below will fill in rest of the indirect elements */
+ start_dp = txr[idx].tx_packed_indir;
+ start_dp->len = ZXDH_DL_NET_HDR_SIZE; /* update actual net or type hdr size */
+ idx = 1;
+ } else {
+ /* setup first tx ring slot to point to header stored in reserved region. */
+ start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = ZXDH_DL_NET_HDR_SIZE;
+ head_flags |= ZXDH_VRING_DESC_F_NEXT;
+ hdr = (void *)&txr[idx].tx_hdr;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+ }
+ zxdh_xmit_fill_net_hdr(cookie, (struct zxdh_net_hdr_dl *)hdr);
+
+ do {
+ start_dp[idx].addr = rte_pktmbuf_iova(cookie);
+ start_dp[idx].len = cookie->data_len;
+ if (likely(idx != head_idx)) {
+ uint16_t flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;
+ flags |= vq->vq_packed.cached_flags;
+ start_dp[idx].flags = flags;
+ }
+ prev = idx;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+ } while ((cookie = cookie->next) != NULL);
+ start_dp[prev].id = id;
+ if (use_indirect) {
+ idx = head_idx;
+ if (++idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+ vq->vq_avail_idx = idx;
+ if (!in_order) {
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;
+ }
+ zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);
+}
+
+uint16_t
+zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct zxdh_virtnet_tx *txvq = tx_queue;
+ struct zxdh_virtqueue *vq = txvq->vq;
+ struct zxdh_hw *hw = vq->hw;
+ uint16_t nb_tx = 0;
+
+ bool in_order = zxdh_pci_with_feature(hw, ZXDH_F_IN_ORDER);
+
+ if (nb_pkts > vq->vq_free_cnt)
+ zxdh_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, in_order);
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int32_t can_push = 0;
+ int32_t use_indirect = 0;
+ int32_t slots = 0;
+ int32_t need = 0;
+
+ /* optimize ring usage */
+ if ((zxdh_pci_with_feature(hw, ZXDH_F_ANY_LAYOUT) ||
+ zxdh_pci_with_feature(hw, ZXDH_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= ZXDH_DL_NET_HDR_SIZE &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ alignof(struct zxdh_net_hdr_dl))) {
+ can_push = 1;
+ } else if (zxdh_pci_with_feature(hw, ZXDH_RING_F_INDIRECT_DESC) &&
+ txm->nb_segs < ZXDH_MAX_TX_INDIRECT) {
+ use_indirect = 1;
+ }
+ /**
+ * How many main ring entries are needed to this Tx?
+ * indirect => 1
+ * any_layout => number of segments
+ * default => number of segments + 1
+ **/
+ slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
+ need = slots - vq->vq_free_cnt;
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ zxdh_xmit_cleanup_packed(vq, need, in_order);
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR, "port[ep:%d, pf:%d, vf:%d, vfid:%d, pcieid:%d], queue:%d[pch:%d]. No free desc to xmit",
+ hw->vport.epid, hw->vport.pfid, hw->vport.vfid,
+ hw->vfid, hw->pcie_id, txvq->queue_id,
+ hw->channel_context[txvq->queue_id].ph_chno);
+ break;
+ }
+ }
+ /* Enqueue Packet buffers */
+ if (can_push)
+ zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);
+ else
+ zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);
+ }
+ if (likely(nb_tx)) {
+ if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {
+ zxdh_queue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+ return nb_tx;
+}
+
+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+ int32_t error;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+ return nb_tx;
+}
diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h
index 8c7f734805..0a02d319b2 100644
--- a/drivers/net/zxdh/zxdh_rxtx.h
+++ b/drivers/net/zxdh/zxdh_rxtx.h
@@ -42,4 +42,8 @@ struct __rte_cache_aligned zxdh_virtnet_tx {
const struct rte_memzone *mz; /* mem zone to populate TX ring. */
};
+uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
#endif /* ZXDH_RXTX_H */
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 45252 bytes --]
next prev parent reply other threads:[~2024-12-23 11:12 UTC|newest]
Thread overview: 207+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24 1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26 6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15 5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15 5:43 ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15 5:44 ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15 5:44 ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15 5:44 ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15 5:44 ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37 ` Stephen Hemminger
2024-10-15 15:57 ` Stephen Hemminger
2024-10-16 8:16 ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16 8:16 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16 8:18 ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16 8:18 ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21 8:50 ` Thomas Monjalon
2024-10-21 10:56 ` Junlong Wang
2024-10-16 8:18 ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21 8:52 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21 8:54 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18 5:18 ` [v6,9/9] " Junlong Wang
2024-10-18 6:48 ` David Marchand
2024-10-19 11:17 ` Junlong Wang
2024-10-21 9:03 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20 ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-02 0:57 ` Ferruh Yigit
2024-11-04 11:58 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58 ` [PATCH v10 01/10] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-07 10:32 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-12 0:42 ` Thomas Monjalon
2024-12-06 5:57 ` [PATCH v1 00/15] net/zxdh: updated " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-10 5:53 ` [PATCH v2 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-10 5:53 ` [PATCH v2 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-11 16:10 ` Stephen Hemminger
2024-12-12 2:06 ` Junlong Wang
2024-12-12 3:35 ` Junlong Wang
2024-12-17 11:41 ` [PATCH v3 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-17 11:41 ` [PATCH v3 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-17 11:41 ` [PATCH v3 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-17 11:41 ` [PATCH v3 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-17 11:41 ` [PATCH v3 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-17 11:41 ` [PATCH v3 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-17 11:41 ` [PATCH v3 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-17 11:41 ` [PATCH v3 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-17 11:41 ` [PATCH v3 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-17 11:41 ` [PATCH v3 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-18 9:25 ` [PATCH v4 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-18 9:25 ` [PATCH v4 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-18 9:25 ` [PATCH v4 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-18 9:25 ` [PATCH v4 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-18 9:25 ` [PATCH v4 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-21 0:51 ` Stephen Hemminger
2024-12-18 9:25 ` [PATCH v4 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-18 9:25 ` [PATCH v4 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-18 9:25 ` [PATCH v4 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-18 9:25 ` [PATCH v4 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-18 9:26 ` [PATCH v4 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-21 0:44 ` Stephen Hemminger
2024-12-18 9:26 ` [PATCH v4 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-18 9:26 ` [PATCH v4 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-21 0:33 ` Stephen Hemminger
2024-12-23 11:02 ` [PATCH v5 00/15] net/zxdh: updated net zxdh driver Junlong Wang
2024-12-23 11:02 ` [PATCH v5 01/15] net/zxdh: zxdh np init implementation Junlong Wang
2024-12-23 11:02 ` [PATCH v5 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-23 11:02 ` [PATCH v5 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-23 11:02 ` [PATCH v5 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-23 11:02 ` Junlong Wang [this message]
2024-12-23 11:02 ` [PATCH v5 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-23 11:02 ` [PATCH v5 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-23 11:02 ` [PATCH v5 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-23 11:02 ` [PATCH v5 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-23 11:02 ` [PATCH v5 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-23 11:02 ` [PATCH v5 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-10 5:53 ` [PATCH v2 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-13 19:38 ` Stephen Hemminger
2024-12-13 19:41 ` Stephen Hemminger
2024-12-13 19:41 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-13 19:42 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-13 19:45 ` Stephen Hemminger
2024-12-13 19:48 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-10 5:53 ` [PATCH v2 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-13 21:05 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-13 19:57 ` Stephen Hemminger
2024-12-13 20:08 ` Stephen Hemminger
2024-12-10 5:53 ` [PATCH v2 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 11/15] net/zxdh: promisc/allmulti " Junlong Wang
2024-12-10 5:53 ` [PATCH v2 12/15] net/zxdh: vlan filter/ offload " Junlong Wang
2024-12-10 5:53 ` [PATCH v2 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-10 5:53 ` [PATCH v2 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-10 5:53 ` [PATCH v2 15/15] net/zxdh: mtu update " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 02/15] net/zxdh: zxdh np uninit implementation Junlong Wang
2024-12-06 5:57 ` [PATCH v1 03/15] net/zxdh: port tables init implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 04/15] net/zxdh: port tables unint implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 05/15] net/zxdh: rx/tx queue setup and intr enable Junlong Wang
2024-12-06 5:57 ` [PATCH v1 06/15] net/zxdh: dev start/stop ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 07/15] net/zxdh: provided dev simple tx implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 08/15] net/zxdh: provided dev simple rx implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 09/15] net/zxdh: link info update, set link up/down Junlong Wang
2024-12-06 5:57 ` [PATCH v1 10/15] net/zxdh: mac set/add/remove ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 11/15] net/zxdh: promiscuous/allmulticast " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 12/15] net/zxdh: vlan filter, vlan offload " Junlong Wang
2024-12-06 5:57 ` [PATCH v1 13/15] net/zxdh: rss hash config/update, reta update/get Junlong Wang
2024-12-06 5:57 ` [PATCH v1 14/15] net/zxdh: basic stats ops implementations Junlong Wang
2024-12-06 5:57 ` [PATCH v1 15/15] net/zxdh: mtu update " Junlong Wang
2024-11-04 11:58 ` [PATCH v10 02/10] net/zxdh: add logging implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 03/10] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 04/10] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-04 11:58 ` [PATCH v10 05/10] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 06/10] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-04 11:58 ` [PATCH v10 07/10] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 08/10] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-04 11:58 ` [PATCH v10 09/10] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-04 11:58 ` [PATCH v10 10/10] net/zxdh: add zxdh dev close ops Junlong Wang
2024-11-06 0:40 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-07 9:28 ` Ferruh Yigit
2024-11-07 9:58 ` Ferruh Yigit
2024-11-12 2:49 ` Junlong Wang
2024-11-01 6:21 ` [PATCH v9 2/9] net/zxdh: add logging implementation Junlong Wang
2024-11-02 1:02 ` Ferruh Yigit
2024-11-04 2:44 ` [v9,2/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-02 1:01 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-02 1:00 ` Ferruh Yigit
2024-11-04 2:47 ` Junlong Wang
2024-11-01 6:21 ` [PATCH v9 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-01 6:21 ` [PATCH v9 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-02 1:06 ` Ferruh Yigit
2024-11-04 3:30 ` [v9,6/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-02 1:07 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-01 6:21 ` [PATCH v9 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-02 0:56 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-04 2:42 ` Junlong Wang
2024-11-04 8:46 ` Ferruh Yigit
2024-11-04 9:52 ` David Marchand
2024-11-04 11:46 ` Junlong Wang
2024-11-04 22:47 ` Thomas Monjalon
2024-11-05 9:39 ` Junlong Wang
2024-11-06 0:38 ` Ferruh Yigit
2024-10-30 9:01 ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30 14:55 ` David Marchand
2024-10-30 9:01 ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30 9:01 ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-30 9:01 ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30 9:01 ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20 ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47 ` Stephen Hemminger
2024-10-27 16:47 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20 ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05 ` Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20 ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31 ` [v7,9/9] " Junlong Wang
2024-10-25 9:48 ` Junlong Wang
2024-10-26 2:32 ` Junlong Wang
2024-10-27 16:40 ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03 ` Stephen Hemminger
2024-10-27 16:58 ` Stephen Hemminger
2024-12-19 22:38 ` [PATCH v4] net/zxdh: Provided zxdh basic init Stephen Hemminger
2024-12-20 1:47 ` Junlong Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241223110249.1483277-8-wang.junlong1@zte.com.cn \
--to=wang.junlong1@zte.com.cn \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).