From: <eagostini@nvidia.com>
To: <dev@dpdk.org>
Cc: Thomas Monjalon <thomas@monjalon.net>
Subject: [dpdk-dev] [PATCH v5 4/9] gpudev: support multi-process
Date: Mon, 8 Nov 2021 18:58:00 +0000 [thread overview]
Message-ID: <20211108185805.3887-5-eagostini@nvidia.com> (raw)
In-Reply-To: <20211108185805.3887-1-eagostini@nvidia.com>
From: Thomas Monjalon <thomas@monjalon.net>
The device data shared between processes are moved in a struct
allocated in a shared memory (a new memzone for all GPUs).
The main struct rte_gpu references the shared memory
via the pointer mpshared.
The API function rte_gpu_attach() is added to attach a device
from the secondary process.
The function rte_gpu_allocate() can be used only by primary process.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
lib/gpudev/gpudev.c | 127 +++++++++++++++++++++++++++++++------
lib/gpudev/gpudev_driver.h | 25 ++++++--
lib/gpudev/version.map | 1 +
3 files changed, 127 insertions(+), 26 deletions(-)
diff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c
index aaf41e6071..17e371102a 100644
--- a/lib/gpudev/gpudev.c
+++ b/lib/gpudev/gpudev.c
@@ -5,6 +5,7 @@
#include <rte_eal.h>
#include <rte_tailq.h>
#include <rte_string_fns.h>
+#include <rte_memzone.h>
#include <rte_errno.h>
#include <rte_log.h>
@@ -28,6 +29,12 @@ static int16_t gpu_max;
/* Number of currently valid devices */
static int16_t gpu_count;
+/* Shared memory between processes. */
+static const char *GPU_MEMZONE = "rte_gpu_shared";
+static struct {
+ __extension__ struct rte_gpu_mpshared gpus[0];
+} *gpu_shared_mem;
+
/* Event callback object */
struct rte_gpu_callback {
TAILQ_ENTRY(rte_gpu_callback) next;
@@ -75,7 +82,7 @@ bool
rte_gpu_is_valid(int16_t dev_id)
{
if (dev_id >= 0 && dev_id < gpu_max &&
- gpus[dev_id].state == RTE_GPU_STATE_INITIALIZED)
+ gpus[dev_id].process_state == RTE_GPU_STATE_INITIALIZED)
return true;
return false;
}
@@ -85,7 +92,7 @@ gpu_match_parent(int16_t dev_id, int16_t parent)
{
if (parent == RTE_GPU_ID_ANY)
return true;
- return gpus[dev_id].info.parent == parent;
+ return gpus[dev_id].mpshared->info.parent == parent;
}
int16_t
@@ -94,7 +101,7 @@ rte_gpu_find_next(int16_t dev_id, int16_t parent)
if (dev_id < 0)
dev_id = 0;
while (dev_id < gpu_max &&
- (gpus[dev_id].state == RTE_GPU_STATE_UNUSED ||
+ (gpus[dev_id].process_state == RTE_GPU_STATE_UNUSED ||
!gpu_match_parent(dev_id, parent)))
dev_id++;
@@ -109,7 +116,7 @@ gpu_find_free_id(void)
int16_t dev_id;
for (dev_id = 0; dev_id < gpu_max; dev_id++) {
- if (gpus[dev_id].state == RTE_GPU_STATE_UNUSED)
+ if (gpus[dev_id].process_state == RTE_GPU_STATE_UNUSED)
return dev_id;
}
return RTE_GPU_ID_NONE;
@@ -136,12 +143,35 @@ rte_gpu_get_by_name(const char *name)
RTE_GPU_FOREACH(dev_id) {
dev = &gpus[dev_id];
- if (strncmp(name, dev->name, RTE_DEV_NAME_MAX_LEN) == 0)
+ if (strncmp(name, dev->mpshared->name, RTE_DEV_NAME_MAX_LEN) == 0)
return dev;
}
return NULL;
}
+static int
+gpu_shared_mem_init(void)
+{
+ const struct rte_memzone *memzone;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ memzone = rte_memzone_reserve(GPU_MEMZONE,
+ sizeof(*gpu_shared_mem) +
+ sizeof(*gpu_shared_mem->gpus) * gpu_max,
+ SOCKET_ID_ANY, 0);
+ } else {
+ memzone = rte_memzone_lookup(GPU_MEMZONE);
+ }
+ if (memzone == NULL) {
+ GPU_LOG(ERR, "cannot initialize shared memory");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+
+ gpu_shared_mem = memzone->addr;
+ return 0;
+}
+
struct rte_gpu *
rte_gpu_allocate(const char *name)
{
@@ -163,6 +193,10 @@ rte_gpu_allocate(const char *name)
if (gpus == NULL && rte_gpu_init(RTE_GPU_DEFAULT_MAX) < 0)
return NULL;
+ /* initialize shared memory before adding first device */
+ if (gpu_shared_mem == NULL && gpu_shared_mem_init() < 0)
+ return NULL;
+
if (rte_gpu_get_by_name(name) != NULL) {
GPU_LOG(ERR, "device with name %s already exists", name);
rte_errno = EEXIST;
@@ -178,16 +212,20 @@ rte_gpu_allocate(const char *name)
dev = &gpus[dev_id];
memset(dev, 0, sizeof(*dev));
- if (rte_strscpy(dev->name, name, RTE_DEV_NAME_MAX_LEN) < 0) {
+ dev->mpshared = &gpu_shared_mem->gpus[dev_id];
+ memset(dev->mpshared, 0, sizeof(*dev->mpshared));
+
+ if (rte_strscpy(dev->mpshared->name, name, RTE_DEV_NAME_MAX_LEN) < 0) {
GPU_LOG(ERR, "device name too long: %s", name);
rte_errno = ENAMETOOLONG;
return NULL;
}
- dev->info.name = dev->name;
- dev->info.dev_id = dev_id;
- dev->info.numa_node = -1;
- dev->info.parent = RTE_GPU_ID_NONE;
+ dev->mpshared->info.name = dev->mpshared->name;
+ dev->mpshared->info.dev_id = dev_id;
+ dev->mpshared->info.numa_node = -1;
+ dev->mpshared->info.parent = RTE_GPU_ID_NONE;
TAILQ_INIT(&dev->callbacks);
+ __atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
gpu_count++;
GPU_LOG(DEBUG, "new device %s (id %d) of total %d",
@@ -195,6 +233,55 @@ rte_gpu_allocate(const char *name)
return dev;
}
+struct rte_gpu *
+rte_gpu_attach(const char *name)
+{
+ int16_t dev_id;
+ struct rte_gpu *dev;
+ struct rte_gpu_mpshared *shared_dev;
+
+ if (rte_eal_process_type() != RTE_PROC_SECONDARY) {
+ GPU_LOG(ERR, "only secondary process can attach device");
+ rte_errno = EPERM;
+ return NULL;
+ }
+ if (name == NULL) {
+ GPU_LOG(ERR, "attach device without a name");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* implicit initialization of library before adding first device */
+ if (gpus == NULL && rte_gpu_init(RTE_GPU_DEFAULT_MAX) < 0)
+ return NULL;
+
+ /* initialize shared memory before adding first device */
+ if (gpu_shared_mem == NULL && gpu_shared_mem_init() < 0)
+ return NULL;
+
+ for (dev_id = 0; dev_id < gpu_max; dev_id++) {
+ shared_dev = &gpu_shared_mem->gpus[dev_id];
+ if (strncmp(name, shared_dev->name, RTE_DEV_NAME_MAX_LEN) == 0)
+ break;
+ }
+ if (dev_id >= gpu_max) {
+ GPU_LOG(ERR, "device with name %s not found", name);
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ dev = &gpus[dev_id];
+ memset(dev, 0, sizeof(*dev));
+
+ TAILQ_INIT(&dev->callbacks);
+ dev->mpshared = shared_dev;
+ __atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
+
+ gpu_count++;
+ GPU_LOG(DEBUG, "attached device %s (id %d) of total %d",
+ name, dev_id, gpu_count);
+ return dev;
+}
+
int16_t
rte_gpu_add_child(const char *name, int16_t parent, uint64_t child_context)
{
@@ -210,11 +297,11 @@ rte_gpu_add_child(const char *name, int16_t parent, uint64_t child_context)
if (dev == NULL)
return -rte_errno;
- dev->info.parent = parent;
- dev->info.context = child_context;
+ dev->mpshared->info.parent = parent;
+ dev->mpshared->info.context = child_context;
rte_gpu_complete_new(dev);
- return dev->info.dev_id;
+ return dev->mpshared->info.dev_id;
}
void
@@ -223,8 +310,7 @@ rte_gpu_complete_new(struct rte_gpu *dev)
if (dev == NULL)
return;
- dev->state = RTE_GPU_STATE_INITIALIZED;
- dev->state = RTE_GPU_STATE_INITIALIZED;
+ dev->process_state = RTE_GPU_STATE_INITIALIZED;
rte_gpu_notify(dev, RTE_GPU_EVENT_NEW);
}
@@ -237,7 +323,7 @@ rte_gpu_release(struct rte_gpu *dev)
rte_errno = ENODEV;
return -rte_errno;
}
- dev_id = dev->info.dev_id;
+ dev_id = dev->mpshared->info.dev_id;
RTE_GPU_FOREACH_CHILD(child, dev_id) {
GPU_LOG(ERR, "cannot release device %d with child %d",
dev_id, child);
@@ -246,11 +332,12 @@ rte_gpu_release(struct rte_gpu *dev)
}
GPU_LOG(DEBUG, "free device %s (id %d)",
- dev->info.name, dev->info.dev_id);
+ dev->mpshared->info.name, dev->mpshared->info.dev_id);
rte_gpu_notify(dev, RTE_GPU_EVENT_DEL);
gpu_free_callbacks(dev);
- dev->state = RTE_GPU_STATE_UNUSED;
+ dev->process_state = RTE_GPU_STATE_UNUSED;
+ __atomic_fetch_sub(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
gpu_count--;
return 0;
@@ -403,7 +490,7 @@ rte_gpu_notify(struct rte_gpu *dev, enum rte_gpu_event event)
int16_t dev_id;
struct rte_gpu_callback *callback;
- dev_id = dev->info.dev_id;
+ dev_id = dev->mpshared->info.dev_id;
rte_rwlock_read_lock(&gpu_callback_lock);
TAILQ_FOREACH(callback, &dev->callbacks, next) {
if (callback->event != event || callback->function == NULL)
@@ -431,7 +518,7 @@ rte_gpu_info_get(int16_t dev_id, struct rte_gpu_info *info)
}
if (dev->ops.dev_info_get == NULL) {
- *info = dev->info;
+ *info = dev->mpshared->info;
return 0;
}
return GPU_DRV_RET(dev->ops.dev_info_get(dev, info));
diff --git a/lib/gpudev/gpudev_driver.h b/lib/gpudev/gpudev_driver.h
index 4d0077161c..9459c7e30f 100644
--- a/lib/gpudev/gpudev_driver.h
+++ b/lib/gpudev/gpudev_driver.h
@@ -35,19 +35,28 @@ struct rte_gpu_ops {
rte_gpu_close_t *dev_close;
};
-struct rte_gpu {
- /* Backing device. */
- struct rte_device *device;
+struct rte_gpu_mpshared {
/* Unique identifier name. */
char name[RTE_DEV_NAME_MAX_LEN]; /* Updated by this library. */
+ /* Driver-specific private data shared in multi-process. */
+ void *dev_private;
/* Device info structure. */
struct rte_gpu_info info;
+ /* Counter of processes using the device. */
+ uint16_t process_refcnt; /* Updated by this library. */
+};
+
+struct rte_gpu {
+ /* Backing device. */
+ struct rte_device *device;
+ /* Data shared between processes. */
+ struct rte_gpu_mpshared *mpshared;
/* Driver functions. */
struct rte_gpu_ops ops;
/* Event callback list. */
TAILQ_HEAD(rte_gpu_callback_list, rte_gpu_callback) callbacks;
/* Current state (used or not) in the running process. */
- enum rte_gpu_state state; /* Updated by this library. */
+ enum rte_gpu_state process_state; /* Updated by this library. */
/* Driver-specific private data for the running process. */
void *process_private;
} __rte_cache_aligned;
@@ -55,15 +64,19 @@ struct rte_gpu {
__rte_internal
struct rte_gpu *rte_gpu_get_by_name(const char *name);
-/* First step of initialization */
+/* First step of initialization in primary process. */
__rte_internal
struct rte_gpu *rte_gpu_allocate(const char *name);
+/* First step of initialization in secondary process. */
+__rte_internal
+struct rte_gpu *rte_gpu_attach(const char *name);
+
/* Last step of initialization. */
__rte_internal
void rte_gpu_complete_new(struct rte_gpu *dev);
-/* Last step of removal. */
+/* Last step of removal (primary or secondary process). */
__rte_internal
int rte_gpu_release(struct rte_gpu *dev);
diff --git a/lib/gpudev/version.map b/lib/gpudev/version.map
index 4a934ed933..58dc632393 100644
--- a/lib/gpudev/version.map
+++ b/lib/gpudev/version.map
@@ -17,6 +17,7 @@ INTERNAL {
global:
rte_gpu_allocate;
+ rte_gpu_attach;
rte_gpu_complete_new;
rte_gpu_get_by_name;
rte_gpu_notify;
--
2.17.1
next prev parent reply other threads:[~2021-11-08 10:47 UTC|newest]
Thread overview: 128+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-02 20:35 [dpdk-dev] [PATCH] gpudev: introduce memory API Thomas Monjalon
2021-06-02 20:46 ` Stephen Hemminger
2021-06-02 20:48 ` Thomas Monjalon
2021-06-03 7:06 ` Andrew Rybchenko
2021-06-03 7:26 ` Thomas Monjalon
2021-06-03 7:49 ` Andrew Rybchenko
2021-06-03 8:26 ` Thomas Monjalon
2021-06-03 8:57 ` Andrew Rybchenko
2021-06-03 7:18 ` David Marchand
2021-06-03 7:30 ` Thomas Monjalon
2021-06-03 7:47 ` Jerin Jacob
2021-06-03 8:28 ` Thomas Monjalon
2021-06-03 8:41 ` Jerin Jacob
2021-06-03 8:43 ` Thomas Monjalon
2021-06-03 8:47 ` Jerin Jacob
2021-06-03 8:53 ` Thomas Monjalon
2021-06-03 9:20 ` Jerin Jacob
2021-06-03 9:36 ` Thomas Monjalon
2021-06-03 10:04 ` Jerin Jacob
2021-06-03 10:30 ` Thomas Monjalon
2021-06-03 11:38 ` Jerin Jacob
2021-06-04 12:55 ` Thomas Monjalon
2021-06-04 15:05 ` Jerin Jacob
2021-06-03 9:33 ` Ferruh Yigit
2021-06-04 10:28 ` Thomas Monjalon
2021-06-04 11:09 ` Jerin Jacob
2021-06-04 12:46 ` Thomas Monjalon
2021-06-04 13:05 ` Andrew Rybchenko
2021-06-04 13:18 ` Thomas Monjalon
2021-06-04 13:59 ` Andrew Rybchenko
2021-06-04 14:09 ` Thomas Monjalon
2021-06-04 15:20 ` Jerin Jacob
2021-06-04 15:51 ` Thomas Monjalon
2021-06-04 18:20 ` Wang, Haiyue
2021-06-05 5:09 ` Jerin Jacob
2021-06-06 1:13 ` Honnappa Nagarahalli
2021-06-06 5:28 ` Jerin Jacob
2021-06-07 10:29 ` Thomas Monjalon
2021-06-07 7:20 ` Wang, Haiyue
2021-06-07 10:43 ` Thomas Monjalon
2021-06-07 13:54 ` Jerin Jacob
2021-06-07 16:47 ` Thomas Monjalon
2021-06-08 4:10 ` Jerin Jacob
2021-06-08 6:34 ` Thomas Monjalon
2021-06-08 7:09 ` Jerin Jacob
2021-06-08 7:32 ` Thomas Monjalon
2021-06-15 18:24 ` Ferruh Yigit
2021-06-15 18:54 ` Thomas Monjalon
2021-06-07 23:31 ` Honnappa Nagarahalli
2021-06-04 5:51 ` Wang, Haiyue
2021-06-04 8:15 ` Thomas Monjalon
2021-06-04 11:07 ` Wang, Haiyue
2021-06-04 12:43 ` Thomas Monjalon
2021-06-04 13:25 ` Wang, Haiyue
2021-06-04 14:06 ` Thomas Monjalon
2021-06-04 18:04 ` Wang, Haiyue
2021-06-05 7:49 ` Thomas Monjalon
2021-06-05 11:09 ` Wang, Haiyue
2021-06-06 1:10 ` Honnappa Nagarahalli
2021-06-07 10:50 ` Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 0/7] heterogeneous computing library Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 1/7] hcdev: introduce heterogeneous computing device library Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 2/7] hcdev: add event notification Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 3/7] hcdev: add child device representing a device context Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 4/7] hcdev: support multi-process Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 5/7] hcdev: add memory API Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 6/7] hcdev: add communication flag Thomas Monjalon
2021-07-30 13:55 ` [dpdk-dev] [RFC PATCH v2 7/7] hcdev: add communication list Thomas Monjalon
2021-07-31 7:06 ` [dpdk-dev] [RFC PATCH v2 0/7] heterogeneous computing library Jerin Jacob
2021-07-31 8:21 ` Thomas Monjalon
2021-07-31 13:42 ` Jerin Jacob
2021-08-27 9:44 ` Thomas Monjalon
2021-08-27 12:19 ` Jerin Jacob
2021-08-29 5:32 ` Wang, Haiyue
2021-09-01 15:35 ` Elena Agostini
2021-09-02 13:12 ` Jerin Jacob
2021-09-06 16:11 ` Elena Agostini
2021-09-06 17:15 ` Wang, Haiyue
2021-09-06 17:22 ` Elena Agostini
2021-09-07 0:55 ` Wang, Haiyue
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 0/9] GPU library eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 1/9] gpudev: introduce GPU device class library eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 2/9] gpudev: add event notification eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 3/9] gpudev: add child device representing a device context eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 4/9] gpudev: support multi-process eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 5/9] gpudev: add memory API eagostini
2021-10-08 20:18 ` Thomas Monjalon
2021-10-29 19:38 ` Mattias Rönnblom
2021-11-08 15:16 ` Elena Agostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 6/9] gpudev: add memory barrier eagostini
2021-10-08 20:16 ` Thomas Monjalon
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 7/9] gpudev: add communication flag eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 8/9] gpudev: add communication list eagostini
2021-10-09 1:53 ` [dpdk-dev] [PATCH v3 9/9] doc: add CUDA example in GPU guide eagostini
2021-10-10 10:16 ` [dpdk-dev] [PATCH v3 0/9] GPU library Jerin Jacob
2021-10-11 8:18 ` Thomas Monjalon
2021-10-11 8:43 ` Jerin Jacob
2021-10-11 9:12 ` Thomas Monjalon
2021-10-11 9:29 ` Jerin Jacob
2021-10-11 10:27 ` Thomas Monjalon
2021-10-11 11:41 ` Jerin Jacob
2021-10-11 12:44 ` Thomas Monjalon
2021-10-11 13:30 ` Jerin Jacob
2021-10-19 10:00 ` Elena Agostini
2021-10-19 18:47 ` Jerin Jacob
2021-10-19 19:11 ` Thomas Monjalon
2021-10-19 19:56 ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 " eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 1/9] gpudev: introduce GPU device class library eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 2/9] gpudev: add event notification eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 3/9] gpudev: add child device representing a device context eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 4/9] gpudev: support multi-process eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 5/9] gpudev: add memory API eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 6/9] gpudev: add memory barrier eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 7/9] gpudev: add communication flag eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 8/9] gpudev: add communication list eagostini
2021-11-03 19:15 ` [dpdk-dev] [PATCH v4 9/9] doc: add CUDA example in GPU guide eagostini
2021-11-08 18:57 ` [dpdk-dev] [PATCH v5 0/9] GPU library eagostini
2021-11-08 16:25 ` Thomas Monjalon
2021-11-08 18:57 ` [dpdk-dev] [PATCH v5 1/9] gpudev: introduce GPU device class library eagostini
2021-11-08 18:57 ` [dpdk-dev] [PATCH v5 2/9] gpudev: add event notification eagostini
2021-11-08 18:57 ` [dpdk-dev] [PATCH v5 3/9] gpudev: add child device representing a device context eagostini
2021-11-08 18:58 ` eagostini [this message]
2021-11-08 18:58 ` [dpdk-dev] [PATCH v5 5/9] gpudev: add memory API eagostini
2021-11-08 18:58 ` [dpdk-dev] [PATCH v5 6/9] gpudev: add memory barrier eagostini
2021-11-08 18:58 ` [dpdk-dev] [PATCH v5 7/9] gpudev: add communication flag eagostini
2021-11-08 18:58 ` [dpdk-dev] [PATCH v5 8/9] gpudev: add communication list eagostini
2021-11-08 18:58 ` [dpdk-dev] [PATCH v5 9/9] doc: add CUDA example in GPU guide eagostini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211108185805.3887-5-eagostini@nvidia.com \
--to=eagostini@nvidia.com \
--cc=dev@dpdk.org \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).