From: Zhihong Wang <zhihong.wang@intel.com>
To: dev@dpdk.org
Cc: jianfeng.tan@intel.com, tiwei.bie@intel.com,
maxime.coquelin@redhat.com, yliu@fridaylinux.org,
cunming.liang@intel.com, xiao.w.wang@intel.com,
dan.daly@intel.com, Zhihong Wang <zhihong.wang@intel.com>
Subject: [dpdk-dev] [PATCH v5 4/5] vhost: adapt vhost lib for selective datapath
Date: Mon, 2 Apr 2018 19:46:55 +0800 [thread overview]
Message-ID: <20180402114656.17090-5-zhihong.wang@intel.com> (raw)
In-Reply-To: <20180402114656.17090-1-zhihong.wang@intel.com>
This patch adapts vhost lib for selective datapath by calling device ops
at the corresponding stage.
Signed-off-by: Zhihong Wang <zhihong.wang@intel.com>
---
Changes in v5:
1. Improve sanity check.
---
Changes in v4:
1. Remove the "engine" concept in the lib.
---
Changes in v2:
1. Ensure negotiated capabilities are supported in vhost-user lib.
2. Configure the data path at the right time.
lib/librte_vhost/rte_vhost.h | 27 ++++++++
lib/librte_vhost/rte_vhost_version.map | 2 +
lib/librte_vhost/socket.c | 113 +++++++++++++++++++++++++++++++--
lib/librte_vhost/vhost.c | 6 ++
lib/librte_vhost/vhost.h | 2 +
lib/librte_vhost/vhost_user.c | 70 ++++++++++++++++++--
6 files changed, 211 insertions(+), 9 deletions(-)
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index 8f35167f2..fe0338d00 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -290,6 +290,33 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features);
int rte_vhost_driver_get_features(const char *path, uint64_t *features);
/**
+ * Get the protocol feature bits before feature negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param protocol_features
+ * A pointer to store the queried protocol feature bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_protocol_features(const char *path,
+ uint64_t *protocol_features);
+
+/**
+ * Get the queue number bits before feature negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param queue_num
+ * A pointer to store the queried queue number bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
+
+/**
* Get the feature bits after negotiation
*
* @param vid
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index e30285d7f..55e0af7e7 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -69,4 +69,6 @@ EXPERIMENTAL {
rte_vhost_driver_detach_vdpa_device;
rte_vhost_driver_get_vdpa_device_id;
rte_vhost_get_vdpa_device_id;
+ rte_vhost_driver_get_protocol_features;
+ rte_vhost_driver_get_queue_num;
} DPDK_18.02;
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index c26940f7a..9a44f0d9e 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -216,6 +216,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
+ vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id);
+
if (vsocket->dequeue_zero_copy)
vhost_enable_dequeue_zero_copy(vid);
@@ -665,20 +667,123 @@ int
rte_vhost_driver_get_features(const char *path, uint64_t *features)
{
struct vhost_user_socket *vsocket;
+ uint64_t vdpa_features;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
- if (vsocket)
+ if (!vsocket) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "socket file %s is not registered yet.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_features) {
*features = vsocket->features;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa features "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *features = vsocket->features & vdpa_features;
+
+unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+int
+rte_vhost_driver_get_protocol_features(const char *path,
+ uint64_t *protocol_features)
+{
+ struct vhost_user_socket *vsocket;
+ uint64_t vdpa_protocol_features;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
if (!vsocket) {
RTE_LOG(ERR, VHOST_CONFIG,
"socket file %s is not registered yet.\n", path);
- return -1;
- } else {
- return 0;
+ ret = -1;
+ goto unlock_exit;
}
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) {
+ *protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_protocol_features(did,
+ &vdpa_protocol_features) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa protocol features "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *protocol_features = VHOST_USER_PROTOCOL_FEATURES
+ & vdpa_protocol_features;
+
+unlock_exit:
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+
+int
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
+{
+ struct vhost_user_socket *vsocket;
+ uint32_t vdpa_queue_num;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (!vsocket) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "socket file %s is not registered yet.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) {
+ *queue_num = VHOST_MAX_QUEUE_PAIRS;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa queue number "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
+
+unlock_exit:
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
}
/*
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 16b0f9a6f..b1afd693a 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -296,11 +296,17 @@ void
vhost_destroy_device(int vid)
{
struct virtio_net *dev = get_device(vid);
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
if (dev == NULL)
return;
if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(vid);
}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 27f40ea2b..d80d02c78 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -27,6 +27,8 @@
#define VIRTIO_DEV_READY 2
/* Used to indicate that the built-in vhost net device backend is enabled */
#define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
+/* Used to indicate that the device has its own data path and configured */
+#define VIRTIO_DEV_VDPA_CONFIGURED 8
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 5c5361066..88785c095 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -133,7 +133,14 @@ vhost_user_set_owner(void)
static int
vhost_user_reset_owner(struct virtio_net *dev)
{
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+
if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(dev->vid);
}
@@ -156,12 +163,26 @@ vhost_user_get_features(struct virtio_net *dev)
}
/*
+ * The queue number that we support are requested.
+ */
+static uint32_t
+vhost_user_get_queue_num(struct virtio_net *dev)
+{
+ uint32_t queue_num = 0;
+
+ rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
+ return queue_num;
+}
+
+/*
* We receive the negotiated features supported by us and the virtio device.
*/
static int
vhost_user_set_features(struct virtio_net *dev, uint64_t features)
{
uint64_t vhost_features = 0;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
if (features & ~vhost_features) {
@@ -191,6 +212,11 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
dev->notify_ops->features_changed(dev->vid, features);
}
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->set_features)
+ vdpa_dev->ops->set_features(dev->vid);
+
dev->features = features;
if (dev->features &
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
@@ -933,14 +959,21 @@ vhost_user_get_vring_base(struct virtio_net *dev,
VhostUserMsg *msg)
{
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(dev->vid);
}
dev->flags &= ~VIRTIO_DEV_READY;
+ dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
/* Here we are safe to get the last avail index */
msg->payload.state.num = vq->last_avail_idx;
@@ -983,16 +1016,24 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
VhostUserMsg *msg)
{
int enable = (int)msg->payload.state.num;
+ int index = (int)msg->payload.state.index;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
- enable, msg->payload.state.index);
+ enable, index);
+
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->set_vring_state)
+ vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(dev->vid,
- msg->payload.state.index, enable);
+ index, enable);
- dev->virtqueue[msg->payload.state.index]->enabled = enable;
+ dev->virtqueue[index]->enabled = enable;
return 0;
}
@@ -1001,9 +1042,10 @@ static void
vhost_user_get_protocol_features(struct virtio_net *dev,
struct VhostUserMsg *msg)
{
- uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+ uint64_t features, protocol_features;
rte_vhost_driver_get_features(dev->ifname, &features);
+ rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
/*
* REPLY_ACK protocol feature is only mandatory for now
@@ -1099,6 +1141,8 @@ static int
vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
{
uint8_t *mac = (uint8_t *)&msg->payload.u64;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
RTE_LOG(DEBUG, VHOST_CONFIG,
":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1114,6 +1158,10 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
*/
rte_smp_wmb();
rte_atomic16_set(&dev->broadcast_rarp, 1);
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->migration_done)
+ vdpa_dev->ops->migration_done(dev->vid);
return 0;
}
@@ -1375,6 +1423,8 @@ vhost_user_msg_handler(int vid, int fd)
{
struct virtio_net *dev;
struct VhostUserMsg msg;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
int ret;
int unlock_required = 0;
@@ -1527,7 +1577,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_GET_QUEUE_NUM:
- msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
+ msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
msg.size = sizeof(msg.payload.u64);
send_vhost_reply(fd, &msg);
break;
@@ -1580,6 +1630,16 @@ vhost_user_msg_handler(int vid, int fd)
}
}
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && virtio_is_ready(dev) &&
+ !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
+ msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+ if (vdpa_dev->ops->dev_conf)
+ vdpa_dev->ops->dev_conf(dev->vid);
+ dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+ }
+
return 0;
}
--
2.13.6
next prev parent reply other threads:[~2018-04-02 11:47 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-02 23:28 [dpdk-dev] [PATCH 0/7] vhost: support " Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 1/7] vhost: make capabilities configurable Zhihong Wang
2018-02-06 10:19 ` Maxime Coquelin
2018-02-08 3:03 ` Wang, Zhihong
2018-02-02 23:28 ` [dpdk-dev] [PATCH 2/7] vhost: export vhost feature definitions Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 3/7] vhost: support selective datapath Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 4/7] vhost: add apis for datapath configuration Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 5/7] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 6/7] vhost: get callfd before device setup Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 7/7] vhost: export new apis Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 0/6] vhost: support selective datapath Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 1/6] vhost: export vhost feature definitions Zhihong Wang
2018-03-06 9:37 ` Tan, Jianfeng
2018-03-06 14:03 ` Maxime Coquelin
2018-03-15 10:58 ` Wang, Zhihong
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 2/6] vhost: support selective datapath Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 3/6] vhost: add apis for datapath configuration Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 5/6] vhost: add apis for live migration Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 4/6] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-05 9:20 ` [dpdk-dev] [PATCH v2 6/6] vhost: export new apis Zhihong Wang
2018-03-06 9:51 ` Tan, Jianfeng
2018-03-15 10:55 ` Wang, Zhihong
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 0/5] vhost: support selective datapath Zhihong Wang
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 2/5] vhost: support selective datapath Zhihong Wang
2018-03-21 21:05 ` Maxime Coquelin
2018-03-22 7:55 ` Wang, Zhihong
2018-03-22 8:31 ` Maxime Coquelin
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-03-21 21:08 ` Maxime Coquelin
2018-03-22 8:22 ` Wang, Zhihong
2018-03-22 14:18 ` Maxime Coquelin
2018-03-23 10:35 ` Wang, Zhihong
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 4/5] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-19 10:13 ` [dpdk-dev] [PATCH v3 5/5] vhost: add apis for live migration Zhihong Wang
2018-03-29 12:15 ` [dpdk-dev] [PATCH v3 0/5] vhost: support selective datapath Wodkowski, PawelX
2018-03-30 9:35 ` Wang, Zhihong
2018-03-30 10:00 ` [dpdk-dev] [PATCH v4 " Zhihong Wang
2018-03-30 10:00 ` [dpdk-dev] [PATCH v4 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-03-31 5:56 ` Maxime Coquelin
2018-03-30 10:01 ` [dpdk-dev] [PATCH v4 2/5] vhost: support selective datapath Zhihong Wang
2018-03-31 6:10 ` Maxime Coquelin
2018-04-02 1:58 ` Wang, Zhihong
2018-03-31 7:38 ` Maxime Coquelin
2018-04-02 2:03 ` Wang, Zhihong
2018-03-30 10:01 ` [dpdk-dev] [PATCH v4 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-03-31 7:04 ` Maxime Coquelin
2018-04-02 2:01 ` Wang, Zhihong
2018-03-30 10:01 ` [dpdk-dev] [PATCH v4 4/5] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-31 7:35 ` Maxime Coquelin
2018-04-02 11:52 ` Wang, Zhihong
2018-03-30 10:01 ` [dpdk-dev] [PATCH v4 5/5] vhost: add apis for live migration Zhihong Wang
2018-03-31 7:39 ` Maxime Coquelin
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 0/5] vhost: support selective datapath Zhihong Wang
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 2/5] vhost: support selective datapath Zhihong Wang
2018-04-03 8:02 ` Maxime Coquelin
2018-04-15 17:39 ` Thomas Monjalon
2018-04-16 7:26 ` Maxime Coquelin
2018-04-03 8:19 ` Maxime Coquelin
2018-04-03 14:35 ` Wang, Zhihong
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-04-03 8:07 ` Maxime Coquelin
2018-04-02 11:46 ` Zhihong Wang [this message]
2018-04-03 8:05 ` [dpdk-dev] [PATCH v5 4/5] vhost: adapt vhost lib for selective datapath Maxime Coquelin
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 5/5] vhost: add apis for live migration Zhihong Wang
2018-04-03 8:27 ` [dpdk-dev] [PATCH v5 0/5] vhost: support selective datapath Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180402114656.17090-5-zhihong.wang@intel.com \
--to=zhihong.wang@intel.com \
--cc=cunming.liang@intel.com \
--cc=dan.daly@intel.com \
--cc=dev@dpdk.org \
--cc=jianfeng.tan@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=xiao.w.wang@intel.com \
--cc=yliu@fridaylinux.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).