DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
To: <dev@dpdk.org>
Cc: <thomas.monjalon@6wind.com>, <bruce.richardson@intel.com>,
	<ferruh.yigit@intel.com>,
	Jerin Jacob <jerin.jacob@caviumnetworks.com>,
	Maciej Czekaj <maciej.czekaj@caviumnetworks.com>,
	Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>,
	Zyta Szpak <zyta.szpak@semihalf.com>,
	Slawomir Rosek <slawomir.rosek@semihalf.com>,
	Radoslaw Biernacki <rad@semihalf.com>
Subject: [dpdk-dev]  [PATCH v6 05/27] net/thunderx/base: add hardware API
Date: Fri, 17 Jun 2016 18:59:32 +0530	[thread overview]
Message-ID: <1466170194-28393-6-git-send-email-jerin.jacob@caviumnetworks.com> (raw)
In-Reply-To: <1466170194-28393-1-git-send-email-jerin.jacob@caviumnetworks.com>

add nicvf hardware specific APIs for initialization and configuration.

Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
Signed-off-by: Kamil Rytarowski <Kamil.Rytarowski@caviumnetworks.com>
Signed-off-by: Zyta Szpak <zyta.szpak@semihalf.com>
Signed-off-by: Slawomir Rosek <slawomir.rosek@semihalf.com>
Signed-off-by: Radoslaw Biernacki <rad@semihalf.com>
---
 drivers/net/thunderx/base/nicvf_hw.c   | 731 +++++++++++++++++++++++++++++++++
 drivers/net/thunderx/base/nicvf_hw.h   | 176 ++++++++
 drivers/net/thunderx/base/nicvf_plat.h |   1 +
 3 files changed, 908 insertions(+)
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.c
 create mode 100644 drivers/net/thunderx/base/nicvf_hw.h

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
new file mode 100644
index 0000000..ec24f9c
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -0,0 +1,731 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <math.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "nicvf_plat.h"
+
+struct nicvf_reg_info {
+	uint32_t offset;
+	const char *name;
+};
+
+#define NICVF_REG_POLL_ITER_NR   (10)
+#define NICVF_REG_POLL_DELAY_US  (2000)
+#define NICVF_REG_INFO(reg) {reg, #reg}
+
+static const struct nicvf_reg_info nicvf_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_VF_CFG),
+	NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
+	NICVF_REG_INFO(NIC_VF_INT),
+	NICVF_REG_INFO(NIC_VF_INT_W1S),
+	NICVF_REG_INFO(NIC_VF_ENA_W1C),
+	NICVF_REG_INFO(NIC_VF_ENA_W1S),
+	NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
+	NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
+};
+
+static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
+	{NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
+	{NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
+	{NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
+	{NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
+	{NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
+	{NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
+	{NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
+	{NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
+	{NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
+	{NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
+	{NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
+	{NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
+	{NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
+	{NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
+	{NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
+};
+
+static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
+	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
+};
+
+static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
+	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
+};
+
+int
+nicvf_base_init(struct nicvf *nic)
+{
+	nic->hwcap = 0;
+	if (nic->subsystem_device_id == 0)
+		return NICVF_ERR_BASE_INIT;
+
+	if (nicvf_hw_version(nic) == NICVF_PASS2)
+		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
+
+	return NICVF_OK;
+}
+
+/* dump on stdout if data is NULL */
+int
+nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
+{
+	uint32_t i, q;
+	bool dump_stdout;
+
+	dump_stdout = data ? 0 : 1;
+
+	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
+		if (dump_stdout)
+			nicvf_log("%24s  = 0x%" PRIx64 "\n",
+				nicvf_reg_tbl[i].name,
+				nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
+		else
+			*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
+
+	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
+		if (dump_stdout)
+			nicvf_log("%24s  = 0x%" PRIx64 "\n",
+				nicvf_multi_reg_tbl[i].name,
+				nicvf_reg_read(nic,
+					nicvf_multi_reg_tbl[i].offset));
+		else
+			*data++ = nicvf_reg_read(nic,
+					nicvf_multi_reg_tbl[i].offset);
+
+	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_cq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_cq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_cq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_rq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_rq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_rq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_sq_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_sq_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_sq_reg_tbl[i].offset, q);
+
+	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
+		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
+			if (dump_stdout)
+				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
+					nicvf_qset_rbdr_reg_tbl[i].name, q,
+					nicvf_queue_reg_read(nic,
+					nicvf_qset_rbdr_reg_tbl[i].offset, q));
+			else
+				*data++ = nicvf_queue_reg_read(nic,
+					nicvf_qset_rbdr_reg_tbl[i].offset, q);
+	return 0;
+}
+
+int
+nicvf_reg_get_count(void)
+{
+	int nr_regs;
+
+	nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
+			MAX_CMP_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
+			MAX_RCV_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
+			MAX_SND_QUEUES_PER_QS;
+	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
+			MAX_RCV_BUF_DESC_RINGS_PER_QS;
+
+	return nr_regs;
+}
+
+static int
+nicvf_qset_config_internal(struct nicvf *nic, bool enable)
+{
+	int ret;
+	struct pf_qs_cfg pf_qs_cfg = {.value = 0};
+
+	pf_qs_cfg.ena = enable ? 1 : 0;
+	pf_qs_cfg.vnic = nic->vf_id;
+	ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
+	return ret ? NICVF_ERR_SET_QS : 0;
+}
+
+/* Requests PF to assign and enable Qset */
+int
+nicvf_qset_config(struct nicvf *nic)
+{
+	/* Enable Qset */
+	return nicvf_qset_config_internal(nic, true);
+}
+
+int
+nicvf_qset_reclaim(struct nicvf *nic)
+{
+	/* Disable Qset */
+	return nicvf_qset_config_internal(nic, false);
+}
+
+static int
+cmpfunc(const void *a, const void *b)
+{
+	return (*(const uint32_t *)a - *(const uint32_t *)b);
+}
+
+static uint32_t
+nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
+{
+	uint32_t i;
+
+	qsort(list, entries, sizeof(uint32_t), cmpfunc);
+	for (i = 0; i < entries; i++)
+		if (val <= list[i])
+			break;
+	/* Not in the list */
+	if (i >= entries)
+		return 0;
+	else
+		return list[i];
+}
+
+static void
+nicvf_handle_qset_err_intr(struct nicvf *nic)
+{
+	uint16_t qidx;
+	uint64_t status;
+
+	nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
+	nicvf_reg_dump(nic, NULL);
+
+	for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(
+				nic, NIC_QSET_CQ_0_7_STATUS, qidx);
+		if (!(status & NICVF_CQ_ERR_MASK))
+			continue;
+
+		if (status & NICVF_CQ_WR_FULL)
+			nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
+		if (status & NICVF_CQ_WR_DISABLE)
+			nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
+		if (status & NICVF_CQ_WR_FAULT)
+			nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
+	}
+
+	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(
+				nic, NIC_QSET_SQ_0_7_STATUS, qidx);
+		if (!(status & NICVF_SQ_ERR_MASK))
+			continue;
+
+		if (status & NICVF_SQ_ERR_STOPPED)
+			nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
+		if (status & NICVF_SQ_ERR_SEND)
+			nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
+		if (status & NICVF_SQ_ERR_DPE)
+			nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
+	}
+
+	for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
+		status = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_STATUS0, qidx);
+		status &= NICVF_RBDR_FIFO_STATE_MASK;
+		status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
+
+		if (status == RBDR_FIFO_STATE_FAIL)
+			nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
+		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
+	}
+
+	nicvf_disable_all_interrupts(nic);
+	abort();
+}
+
+/*
+ * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
+ * This function is not re-entrant.
+ * The caller should provide proper serialization.
+ */
+int
+nicvf_reg_poll_interrupts(struct nicvf *nic)
+{
+	int msg = 0;
+	uint64_t intr;
+
+	intr = nicvf_reg_read(nic, NIC_VF_INT);
+	if (intr & NICVF_INTR_MBOX_MASK) {
+		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
+		msg = nicvf_handle_mbx_intr(nic);
+	}
+	if (intr & NICVF_INTR_QS_ERR_MASK) {
+		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
+		nicvf_handle_qset_err_intr(nic);
+	}
+	return msg;
+}
+
+static int
+nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
+		    uint32_t bit_pos, uint32_t bits, uint64_t val)
+{
+	uint64_t bit_mask;
+	uint64_t reg_val;
+	int timeout = NICVF_REG_POLL_ITER_NR;
+
+	bit_mask = (1ULL << bits) - 1;
+	bit_mask = (bit_mask << bit_pos);
+
+	while (timeout) {
+		reg_val = nicvf_queue_reg_read(nic, offset, qidx);
+		if (((reg_val & bit_mask) >> bit_pos) == val)
+			return NICVF_OK;
+		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+		timeout--;
+	}
+	return NICVF_ERR_REG_POLL;
+}
+
+int
+nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t status;
+	int timeout = NICVF_REG_POLL_ITER_NR;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+
+	/* Save head and tail pointers for freeing up buffers */
+	if (rbdr) {
+		rbdr->head = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
+		rbdr->tail = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_TAIL,	qidx) >> 3;
+		rbdr->next_tail = rbdr->tail;
+	}
+
+	/* Reset RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+				NICVF_RBDR_RESET);
+
+	/* Disable RBDR */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
+				62, 2, 0x00))
+		return NICVF_ERR_RBDR_DISABLE;
+
+	while (1) {
+		status = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_PRFCH_STATUS,	qidx);
+		if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
+			break;
+		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
+		timeout--;
+		if (!timeout)
+			return NICVF_ERR_RBDR_PREFETCH;
+	}
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
+			NICVF_RBDR_RESET);
+	if (nicvf_qset_poll_reg(nic, qidx,
+			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+		return NICVF_ERR_RBDR_RESET1;
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+	if (nicvf_qset_poll_reg(nic, qidx,
+			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+		return NICVF_ERR_RBDR_RESET2;
+
+	return NICVF_OK;
+}
+
+static int
+nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
+{
+	int val;
+
+	val = ((uint32_t)log2(len) - len_shift);
+	assert(val >= NICVF_QSIZE_MIN_VAL);
+	assert(val <= NICVF_QSIZE_MAX_VAL);
+	return val;
+}
+
+int
+nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
+{
+	int ret;
+	uint64_t head, tail;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+	struct rbdr_cfg rbdr_cfg = {.value = 0};
+
+	ret = nicvf_qset_rbdr_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Set descriptor base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
+
+	/* Enable RBDR  & set queue size */
+	rbdr_cfg.ena = 1;
+	rbdr_cfg.reset = 0;
+	rbdr_cfg.ldwb = 0;
+	rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
+						RBDR_SIZE_SHIFT);
+	rbdr_cfg.avg_con = 0;
+	rbdr_cfg.lines = rbdr->buffsz / 128;
+
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
+
+	/* Verify proper RBDR reset */
+	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
+
+	if (head | tail)
+		return NICVF_ERR_RBDR_RESET;
+
+	return NICVF_OK;
+}
+
+uint32_t
+nicvf_qsize_rbdr_roundup(uint32_t val)
+{
+	uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
+			RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
+			RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
+			RBDR_QUEUE_SZ_512K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+			  rbdr_pool_get_handler handler,
+			  void *opaque, uint32_t max_buffs)
+{
+	struct rbdr_entry_t *desc, *desc0;
+	struct nicvf_rbdr *rbdr = nic->rbdr;
+	uint32_t count;
+	nicvf_phys_addr_t phy;
+
+	assert(rbdr != NULL);
+	desc = rbdr->desc;
+	count = 0;
+	/* Don't fill beyond max numbers of desc */
+	while (count < rbdr->qlen_mask) {
+		if (count >= max_buffs)
+			break;
+		desc0 = desc + count;
+		phy = handler(opaque);
+		if (phy) {
+			desc0->full_addr = phy;
+			count++;
+		} else {
+			break;
+		}
+	}
+	nicvf_smp_wmb();
+	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
+	rbdr->tail = nicvf_queue_reg_read(nic,
+				NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
+	rbdr->next_tail = rbdr->tail;
+	nicvf_smp_rmb();
+	return 0;
+}
+
+int
+nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
+{
+	return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+}
+
+int
+nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t head, tail;
+	struct sq_cfg sq_cfg;
+
+	sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+
+	/* Disable send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+
+	/* Check if SQ is stopped */
+	if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
+				NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
+		return NICVF_ERR_SQ_DISABLE;
+
+	/* Reset send queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+	if (head | tail)
+		return  NICVF_ERR_SQ_RESET;
+
+	return 0;
+}
+
+int
+nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
+{
+	int ret;
+	struct sq_cfg sq_cfg = {.value = 0};
+
+	ret = nicvf_qset_sq_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Send a mailbox msg to PF to config SQ */
+	if (nicvf_mbox_sq_config(nic, qidx))
+		return  NICVF_ERR_SQ_PF_CFG;
+
+	/* Set queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
+
+	/* Enable send queue  & set queue size */
+	sq_cfg.ena = 1;
+	sq_cfg.reset = 0;
+	sq_cfg.ldwb = 0;
+	sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
+	sq_cfg.tstmp_bgx_intf = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
+
+	/* Ring doorbell so that H/W restarts processing SQEs */
+	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+
+	return 0;
+}
+
+uint32_t
+nicvf_qsize_sq_roundup(uint32_t val)
+{
+	uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
+			SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
+			SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
+			SND_QUEUE_SZ_64K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+int
+nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	/* Disable receive queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+	return nicvf_mbox_rq_sync(nic);
+}
+
+int
+nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+	struct pf_rq_cfg pf_rq_cfg = {.value = 0};
+	struct rq_cfg rq_cfg = {.value = 0};
+
+	if (nicvf_qset_rq_reclaim(nic, qidx))
+		return NICVF_ERR_RQ_CLAIM;
+
+	pf_rq_cfg.strip_pre_l2 = 0;
+	/* First cache line of RBDR data will be allocated into L2C */
+	pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
+	pf_rq_cfg.cq_qs = nic->vf_id;
+	pf_rq_cfg.cq_idx = qidx;
+	pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
+	pf_rq_cfg.rbdr_cont_idx = 0;
+	pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
+	pf_rq_cfg.rbdr_strt_idx = 0;
+
+	/* Send a mailbox msg to PF to config RQ */
+	if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
+		return NICVF_ERR_RQ_PF_CFG;
+
+	/* Select Rx backpressure */
+	if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
+		return NICVF_ERR_RQ_BP_CFG;
+
+	/* Send a mailbox msg to PF to config RQ drop */
+	if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
+		return NICVF_ERR_RQ_DROP_CFG;
+
+	/* Enable Receive queue */
+	rq_cfg.ena = 1;
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
+
+	return 0;
+}
+
+int
+nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
+{
+	uint64_t tail, head;
+
+	/* Disable completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
+		return NICVF_ERR_CQ_DISABLE;
+
+	/* Reset completion queue */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+	tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
+	head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
+	if (head | tail)
+		return  NICVF_ERR_CQ_RESET;
+
+	/* Disable timer threshold (doesn't get reset upon CQ reset) */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	return 0;
+}
+
+int
+nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
+{
+	int ret;
+	struct cq_cfg cq_cfg = {.value = 0};
+
+	ret = nicvf_qset_cq_reclaim(nic, qidx);
+	if (ret)
+		return ret;
+
+	/* Set completion queue base address */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
+
+	cq_cfg.ena = 1;
+	cq_cfg.reset = 0;
+	/* Writes of CQE will be allocated into L2C */
+	cq_cfg.caching = 1;
+	cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
+	cq_cfg.avg_con = 0;
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
+
+	/* Set threshold value for interrupt generation */
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
+	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+	return 0;
+}
+
+uint32_t
+nicvf_qsize_cq_roundup(uint32_t val)
+{
+	uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
+			CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
+			CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
+			CMP_QUEUE_SZ_64K};
+	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
+}
+
+
+void
+nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
+{
+	uint64_t val;
+
+	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+	if (enable)
+		val |= (STRIP_FIRST_VLAN << 25);
+	else
+		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+
+	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+int
+nicvf_loopback_config(struct nicvf *nic, bool enable)
+{
+	if (enable && nic->loopback_supported == 0)
+		return NICVF_ERR_LOOPBACK_CFG;
+
+	return nicvf_mbox_loopback_config(nic, enable);
+}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
new file mode 100644
index 0000000..dc9f4f1
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -0,0 +1,176 @@
+/*
+ *   BSD LICENSE
+ *
+ *   Copyright (C) Cavium networks Ltd. 2016.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Cavium networks nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _THUNDERX_NICVF_HW_H
+#define _THUNDERX_NICVF_HW_H
+
+#include <stdint.h>
+
+#include "nicvf_hw_defs.h"
+
+#define	PCI_VENDOR_ID_CAVIUM			0x177D
+#define	PCI_DEVICE_ID_THUNDERX_PASS1_NICVF	0x0011
+#define	PCI_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA034
+#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF	0xA11E
+#define	PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF	0xA134
+
+#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define NICVF_PASS1	(PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF)
+#define NICVF_PASS2	(PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF)
+
+#define NICVF_CAP_TUNNEL_PARSING          (1ULL << 0)
+
+enum nicvf_tns_mode {
+	NIC_TNS_BYPASS_MODE,
+	NIC_TNS_MODE,
+};
+
+enum nicvf_err_e {
+	NICVF_OK,
+	NICVF_ERR_SET_QS = -8191,/* -8191 */
+	NICVF_ERR_RESET_QS,      /* -8190 */
+	NICVF_ERR_REG_POLL,      /* -8189 */
+	NICVF_ERR_RBDR_RESET,    /* -8188 */
+	NICVF_ERR_RBDR_DISABLE,  /* -8187 */
+	NICVF_ERR_RBDR_PREFETCH, /* -8186 */
+	NICVF_ERR_RBDR_RESET1,   /* -8185 */
+	NICVF_ERR_RBDR_RESET2,   /* -8184 */
+	NICVF_ERR_RQ_CLAIM,      /* -8183 */
+	NICVF_ERR_RQ_PF_CFG,	 /* -8182 */
+	NICVF_ERR_RQ_BP_CFG,	 /* -8181 */
+	NICVF_ERR_RQ_DROP_CFG,	 /* -8180 */
+	NICVF_ERR_CQ_DISABLE,	 /* -8179 */
+	NICVF_ERR_CQ_RESET,	 /* -8178 */
+	NICVF_ERR_SQ_DISABLE,	 /* -8177 */
+	NICVF_ERR_SQ_RESET,	 /* -8176 */
+	NICVF_ERR_SQ_PF_CFG,	 /* -8175 */
+	NICVF_ERR_LOOPBACK_CFG,  /* -8174 */
+	NICVF_ERR_BASE_INIT,     /* -8173 */
+};
+
+typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
+
+/* Common structs used in DPDK and base layer are defined in DPDK layer */
+#include "../nicvf_struct.h"
+
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128);
+NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128);
+
+static inline void
+nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val)
+{
+	nicvf_addr_write(nic->reg_base + offset, val);
+}
+
+static inline uint64_t
+nicvf_reg_read(struct nicvf *nic, uint32_t offset)
+{
+	return nicvf_addr_read(nic->reg_base + offset);
+}
+
+static inline uintptr_t
+nicvf_qset_base(struct nicvf *nic, uint32_t qidx)
+{
+	return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT);
+}
+
+static inline void
+nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx,
+		      uint64_t val)
+{
+	nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val);
+}
+
+static inline uint64_t
+nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx)
+{
+	return	nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset);
+}
+
+static inline void
+nicvf_disable_all_interrupts(struct nicvf *nic)
+{
+	nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK);
+	nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK);
+}
+
+static inline uint32_t
+nicvf_hw_version(struct nicvf *nic)
+{
+	return nic->subsystem_device_id;
+}
+
+static inline uint64_t
+nicvf_hw_cap(struct nicvf *nic)
+{
+	return nic->hwcap;
+}
+
+int nicvf_base_init(struct nicvf *nic);
+
+int nicvf_reg_get_count(void);
+int nicvf_reg_poll_interrupts(struct nicvf *nic);
+int nicvf_reg_dump(struct nicvf *nic, uint64_t *data);
+
+int nicvf_qset_config(struct nicvf *nic);
+int nicvf_qset_reclaim(struct nicvf *nic);
+
+int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
+int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
+			      rbdr_pool_get_handler handler, void *opaque,
+			      uint32_t max_buffs);
+int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_rxq *rxq);
+int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_rxq *rxq);
+int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx,
+			 struct nicvf_txq *txq);
+int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx);
+
+uint32_t nicvf_qsize_rbdr_roundup(uint32_t val);
+uint32_t nicvf_qsize_cq_roundup(uint32_t val);
+uint32_t nicvf_qsize_sq_roundup(uint32_t val);
+
+void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
+
+int nicvf_loopback_config(struct nicvf *nic, bool enable);
+
+#endif /* _THUNDERX_NICVF_HW_H */
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index fbf28ce..83c1844 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -126,6 +126,7 @@ do {							\
 
 #endif
 
+#include "nicvf_hw.h"
 #include "nicvf_mbox.h"
 
 #endif /* _THUNDERX_NICVF_H */
-- 
2.5.5

  parent reply	other threads:[~2016-06-17 13:30 UTC|newest]

Thread overview: 204+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-07 15:16 [dpdk-dev] [PATCH 00/20] DPDK PMD for ThunderX NIC device Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 01/20] thunderx/nicvf/base: add hardware API for ThunderX nicvf inbuilt NIC Jerin Jacob
2016-05-09 17:38   ` Stephen Hemminger
2016-05-12 15:40   ` Pattan, Reshma
2016-05-07 15:16 ` [dpdk-dev] [PATCH 02/20] thunderx/nicvf: add pmd skeleton Jerin Jacob
2016-05-09 17:40   ` Stephen Hemminger
2016-05-09 17:41   ` Stephen Hemminger
2016-05-10  7:25     ` Jerin Jacob
2016-05-11  5:37   ` Panu Matilainen
2016-05-11 12:23   ` Pattan, Reshma
2016-05-07 15:16 ` [dpdk-dev] [PATCH 03/20] thunderx/nicvf: add link status and link update support Jerin Jacob
2016-06-08 16:10   ` Ferruh Yigit
2016-05-07 15:16 ` [dpdk-dev] [PATCH 04/20] thunderx/nicvf: add get_reg and get_reg_length support Jerin Jacob
2016-05-12 15:39   ` Pattan, Reshma
2016-05-13  8:14     ` Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 05/20] thunderx/nicvf: add dev_configure support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 06/20] thunderx/nicvf: add dev_infos_get support Jerin Jacob
2016-05-13 13:52   ` Pattan, Reshma
2016-05-07 15:16 ` [dpdk-dev] [PATCH 07/20] thunderx/nicvf: add rx_queue_setup/release support Jerin Jacob
2016-05-19  9:30   ` Pattan, Reshma
2016-05-07 15:16 ` [dpdk-dev] [PATCH 08/20] thunderx/nicvf: add tx_queue_setup/release support Jerin Jacob
2016-05-19 12:19   ` Pattan, Reshma
2016-05-07 15:16 ` [dpdk-dev] [PATCH 09/20] thunderx/nicvf: add rss and reta query and update support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 10/20] thunderx/nicvf: add mtu_set and promiscuous_enable support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 11/20] thunderx/nicvf: add stats support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 12/20] thunderx/nicvf: add single and multi segment tx functions Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 13/20] thunderx/nicvf: add single and multi segment rx functions Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 14/20] thunderx/nicvf: add dev_supported_ptypes_get and rx_queue_count support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 15/20] thunderx/nicvf: add rx queue start and stop support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 16/20] thunderx/nicvf: add tx " Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 17/20] thunderx/nicvf: add device start, stop and close support Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 18/20] thunderx/config: set max numa node to two Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 19/20] thunderx/nicvf: updated driver documentation and release notes Jerin Jacob
2016-05-09  8:47   ` Thomas Monjalon
2016-05-09  9:35     ` Jerin Jacob
2016-05-17 16:31   ` Mcnamara, John
2016-05-19  6:19     ` Jerin Jacob
2016-05-07 15:16 ` [dpdk-dev] [PATCH 20/20] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob
2016-05-09  8:50   ` Thomas Monjalon
2016-05-29 16:46 ` [dpdk-dev] [PATCH v2 00/20] DPDK PMD for ThunderX NIC device Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 01/20] thunderx/nicvf/base: add hardware API for ThunderX nicvf inbuilt NIC Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 02/20] thunderx/nicvf: add pmd skeleton Jerin Jacob
2016-05-31 16:53     ` Stephen Hemminger
2016-06-01  9:14       ` Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 03/20] thunderx/nicvf: add link status and link update support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 04/20] thunderx/nicvf: add get_reg and get_reg_length support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 05/20] thunderx/nicvf: add dev_configure support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 06/20] thunderx/nicvf: add dev_infos_get support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 07/20] thunderx/nicvf: add rx_queue_setup/release support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 08/20] thunderx/nicvf: add tx_queue_setup/release support Jerin Jacob
2016-05-29 16:46   ` [dpdk-dev] [PATCH v2 09/20] thunderx/nicvf: add rss and reta query and update support Jerin Jacob
2016-06-07 16:40   ` [dpdk-dev] [PATCH v3 00/20] DPDK PMD for ThunderX NIC device Jerin Jacob
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 01/20] thunderx/nicvf/base: add hardware API for ThunderX nicvf inbuilt NIC Jerin Jacob
2016-06-08 12:18       ` Ferruh Yigit
2016-06-08 15:45       ` Ferruh Yigit
2016-06-13 13:55       ` [dpdk-dev] [PATCH v4 00/19] DPDK PMD for ThunderX NIC device Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 01/19] net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC Jerin Jacob
2016-06-13 15:09           ` Bruce Richardson
2016-06-14 13:52             ` Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 02/19] net/thunderx: add pmd skeleton Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 03/19] net/thunderx: add link status and link update support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 04/19] net/thunderx: add get_reg and get_reg_length support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 05/19] net/thunderx: add dev_configure support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 06/19] net/thunderx: add dev_infos_get support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 07/19] net/thunderx: add rx_queue_setup/release support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 08/19] net/thunderx: add tx_queue_setup/release support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 09/19] net/thunderx: add rss and reta query and update support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 10/19] net/thunderx: add mtu_set and promiscuous_enable support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 11/19] net/thunderx: add stats support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 12/19] net/thunderx: add single and multi segment tx functions Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 13/19] net/thunderx: add single and multi segment rx functions Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 14/19] net/thunderx: add dev_supported_ptypes_get and rx_queue_count support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 15/19] net/thunderx: add rx queue start and stop support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 16/19] net/thunderx: add tx " Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 17/19] net/thunderx: add device start, stop and close support Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 18/19] net/thunderx: updated driver documentation and release notes Jerin Jacob
2016-06-13 13:55         ` [dpdk-dev] [PATCH v4 19/19] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob
2016-06-13 15:46         ` [dpdk-dev] [PATCH v4 00/19] DPDK PMD for ThunderX NIC device Bruce Richardson
2016-06-14 19:06         ` [dpdk-dev] [PATCH v5 00/25] " Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 01/25] net/thunderx/base: add HW constants for ThunderX inbuilt NIC Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 02/25] net/thunderx/base: add register definition " Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 03/25] net/thunderx/base: implement DPDK based platform abstraction for base code Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 04/25] net/thunderx/base: add mbox API for ThunderX PF/VF driver communication Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 05/25] net/thunderx/base: add hardware API for ThunderX nicvf inbuilt NIC Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 06/25] net/thunderx/base: add RSS and reta configuration HW APIs Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 07/25] net/thunderx/base: add statistics get " Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 08/25] net/thunderx: add pmd skeleton Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 09/25] net/thunderx: add link status and link update support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 10/25] net/thunderx: add registers dump support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 11/25] net/thunderx: add ethdev configure support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 12/25] net/thunderx: add get device info support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 13/25] net/thunderx: add Rx queue setup and release support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 14/25] net/thunderx: add Tx " Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 15/25] net/thunderx: add RSS and reta query and update support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 16/25] net/thunderx: add MTU set and promiscuous enable support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 17/25] net/thunderx: add stats support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 18/25] net/thunderx: add single and multi segment Tx functions Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 19/25] net/thunderx: add single and multi segment Rx functions Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 20/25] net/thunderx: implement supported ptype get and Rx queue count Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 21/25] net/thunderx: add Rx queue start and stop support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 22/25] net/thunderx: add Tx " Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 23/25] net/thunderx: add device start, stop and close support Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 24/25] net/thunderx: updated driver documentation and release notes Jerin Jacob
2016-06-14 19:06           ` [dpdk-dev] [PATCH v5 25/25] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob
2016-06-15 14:39           ` [dpdk-dev] [PATCH v5 00/25] DPDK PMD for ThunderX NIC device Bruce Richardson
2016-06-16  9:31             ` Jerin Jacob
2016-06-16 10:58               ` Bruce Richardson
2016-06-16 11:17                 ` Jerin Jacob
2016-06-16 14:33                   ` Bruce Richardson
2016-06-17 13:29           ` [dpdk-dev] [PATCH v6 00/27] " Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 01/27] net/thunderx/base: add HW constants Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 02/27] net/thunderx/base: add HW register definitions Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 03/27] net/thunderx/base: implement DPDK based platform abstraction Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 04/27] net/thunderx/base: add mbox APIs for PF/VF communication Jerin Jacob
2016-06-21 13:41               ` Ferruh Yigit
2016-06-17 13:29             ` Jerin Jacob [this message]
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 06/27] net/thunderx/base: add RSS and reta configuration HW APIs Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 07/27] net/thunderx/base: add statistics get " Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 08/27] net/thunderx: add pmd skeleton Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 09/27] net/thunderx: add link status and link update support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 10/27] net/thunderx: add registers dump support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 11/27] net/thunderx: add ethdev configure support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 12/27] net/thunderx: add get device info support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 13/27] net/thunderx: add Rx queue setup and release support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 14/27] net/thunderx: add Tx " Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 15/27] net/thunderx: add RSS and reta query and update support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 16/27] net/thunderx: add MTU set support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 17/27] net/thunderx: add promiscuous enable support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 18/27] net/thunderx: add stats support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 19/27] net/thunderx: add single and multi segment Tx functions Jerin Jacob
2016-06-21 13:34               ` Ferruh Yigit
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 20/27] net/thunderx: add single and multi segment Rx functions Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 21/27] net/thunderx: add supported packet type get Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 22/27] net/thunderx: add Rx queue count support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 23/27] net/thunderx: add Rx queue start and stop support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 24/27] net/thunderx: add Tx " Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 25/27] net/thunderx: add device start, stop and close support Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 26/27] net/thunderx: updated driver documentation and release notes Jerin Jacob
2016-06-17 13:29             ` [dpdk-dev] [PATCH v6 27/27] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob
2016-06-20 11:23               ` Bruce Richardson
2016-06-20 11:28             ` [dpdk-dev] [PATCH v6 00/27] DPDK PMD for ThunderX NIC device Bruce Richardson
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 02/20] thunderx/nicvf: add pmd skeleton Jerin Jacob
2016-06-08 12:18       ` Ferruh Yigit
2016-06-08 16:06       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 03/20] thunderx/nicvf: add link status and link update support Jerin Jacob
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 04/20] thunderx/nicvf: add get_reg and get_reg_length support Jerin Jacob
2016-06-08 16:16       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 05/20] thunderx/nicvf: add dev_configure support Jerin Jacob
2016-06-08 16:21       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 06/20] thunderx/nicvf: add dev_infos_get support Jerin Jacob
2016-06-08 16:23       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 07/20] thunderx/nicvf: add rx_queue_setup/release support Jerin Jacob
2016-06-08 16:42       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 08/20] thunderx/nicvf: add tx_queue_setup/release support Jerin Jacob
2016-06-08 12:24       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 09/20] thunderx/nicvf: add rss and reta query and update support Jerin Jacob
2016-06-08 16:45       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 10/20] thunderx/nicvf: add mtu_set and promiscuous_enable support Jerin Jacob
2016-06-08 16:48       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 11/20] thunderx/nicvf: add stats support Jerin Jacob
2016-06-08 16:53       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 12/20] thunderx/nicvf: add single and multi segment tx functions Jerin Jacob
2016-06-08 12:11       ` Ferruh Yigit
2016-06-08 12:51       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 13/20] thunderx/nicvf: add single and multi segment rx functions Jerin Jacob
2016-06-08 17:04       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 14/20] thunderx/nicvf: add dev_supported_ptypes_get and rx_queue_count support Jerin Jacob
2016-06-08 17:17       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 15/20] thunderx/nicvf: add rx queue start and stop support Jerin Jacob
2016-06-08 17:42       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 16/20] thunderx/nicvf: add tx " Jerin Jacob
2016-06-08 17:46       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 17/20] thunderx/nicvf: add device start, stop and close support Jerin Jacob
2016-06-08 12:25       ` Ferruh Yigit
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 18/20] thunderx/config: set max numa node to two Jerin Jacob
2016-06-08 17:54       ` Ferruh Yigit
2016-06-13 13:11         ` Jerin Jacob
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 19/20] thunderx/nicvf: updated driver documentation and release notes Jerin Jacob
2016-06-08 12:08       ` Ferruh Yigit
2016-06-08 12:27         ` Jerin Jacob
2016-06-08 13:18           ` Bruce Richardson
2016-06-07 16:40     ` [dpdk-dev] [PATCH v3 20/20] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob
2016-06-08 12:30     ` [dpdk-dev] [PATCH v3 00/20] DPDK PMD for ThunderX NIC device Ferruh Yigit
2016-06-08 12:43       ` Jerin Jacob
2016-06-08 13:15         ` Ferruh Yigit
2016-06-08 13:22         ` Bruce Richardson
2016-06-08 13:32           ` Jerin Jacob
2016-06-08 13:51             ` Thomas Monjalon
2016-06-08 13:42         ` Thomas Monjalon
2016-06-08 15:08           ` Bruce Richardson
2016-06-09 10:49             ` Jerin Jacob
2016-06-09 14:02               ` Thomas Monjalon
2016-06-09 14:11                 ` Bruce Richardson
2016-05-29 16:53 ` [dpdk-dev] [PATCH v2 10/20] thunderx/nicvf: add mtu_set and promiscuous_enable support Jerin Jacob
2016-05-29 16:54 ` [dpdk-dev] [PATCH v2 11/20] thunderx/nicvf: add stats support Jerin Jacob
2016-05-29 16:54   ` [dpdk-dev] [PATCH v2 12/20] thunderx/nicvf: add single and multi segment tx functions Jerin Jacob
2016-05-29 16:54   ` [dpdk-dev] [PATCH v2 13/20] thunderx/nicvf: add single and multi segment rx functions Jerin Jacob
2016-05-29 16:54   ` [dpdk-dev] [PATCH v2 14/20] thunderx/nicvf: add dev_supported_ptypes_get and rx_queue_count support Jerin Jacob
2016-05-29 16:54   ` [dpdk-dev] [PATCH v2 15/20] thunderx/nicvf: add rx queue start and stop support Jerin Jacob
2016-05-29 16:54   ` [dpdk-dev] [PATCH v2 16/20] thunderx/nicvf: add tx " Jerin Jacob
2016-05-29 16:57 ` [dpdk-dev] [PATCH v2 17/20] thunderx/nicvf: add device start, stop and close support Jerin Jacob
2016-05-29 16:57   ` [dpdk-dev] [PATCH v2 18/20] thunderx/config: set max numa node to two Jerin Jacob
2016-05-29 16:57   ` [dpdk-dev] [PATCH v2 19/20] thunderx/nicvf: updated driver documentation and release notes Jerin Jacob
2016-05-29 16:57   ` [dpdk-dev] [PATCH v2 20/20] maintainers: claim responsibility for the ThunderX nicvf PMD Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466170194-28393-6-git-send-email-jerin.jacob@caviumnetworks.com \
    --to=jerin.jacob@caviumnetworks.com \
    --cc=Kamil.Rytarowski@caviumnetworks.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=maciej.czekaj@caviumnetworks.com \
    --cc=rad@semihalf.com \
    --cc=slawomir.rosek@semihalf.com \
    --cc=thomas.monjalon@6wind.com \
    --cc=zyta.szpak@semihalf.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).