DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess
@ 2022-11-15 12:35 Fengnan Chang
  2022-11-22 15:24 ` Olivier Matz
  0 siblings, 1 reply; 4+ messages in thread
From: Fengnan Chang @ 2022-11-15 12:35 UTC (permalink / raw)
  To: david.marchand, olivier.matz, mb, dev; +Cc: Fengnan Chang

rte_mempool_create put tailq entry into rte_mempool_tailq list before
populate, and pool_data set when populate. So in multi process, if
process A create mempool, and process B can get mempool through
rte_mempool_lookup before pool_data set, if B call rte_mempool_avail_count,
it will cause segment fault.

Fix this by put tailq entry into rte_mempool_tailq after populate.

Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
---
 lib/mempool/rte_mempool.c | 43 ++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 19 deletions(-)

diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 4c78071a34..b3a6572fc8 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -155,6 +155,27 @@ get_min_page_size(int socket_id)
 	return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
 }
 
+static int
+add_mempool_to_list(struct rte_mempool *mp)
+{
+	struct rte_mempool_list *mempool_list;
+	struct rte_tailq_entry *te = NULL;
+
+	/* try to allocate tailq entry */
+	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
+		return -ENOMEM;
+	}
+
+	te->data = mp;
+	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+	rte_mcfg_tailq_write_lock();
+	TAILQ_INSERT_TAIL(mempool_list, te, next);
+	rte_mcfg_tailq_write_unlock();
+
+	return 0;
+}
 
 static void
 mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
@@ -304,6 +325,9 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
 		if (ret != 0)
 			return ret;
 		mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
+		ret = add_mempool_to_list(mp);
+		if (ret != 0)
+			return ret;
 	}
 	return 0;
 }
@@ -798,9 +822,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
 	int socket_id, unsigned flags)
 {
 	char mz_name[RTE_MEMZONE_NAMESIZE];
-	struct rte_mempool_list *mempool_list;
 	struct rte_mempool *mp = NULL;
-	struct rte_tailq_entry *te = NULL;
 	const struct rte_memzone *mz = NULL;
 	size_t mempool_size;
 	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
@@ -820,8 +842,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
 			  RTE_CACHE_LINE_MASK) != 0);
 #endif
 
-	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
-
 	/* asked for zero items */
 	if (n == 0) {
 		rte_errno = EINVAL;
@@ -866,14 +886,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
 	private_data_size = (private_data_size +
 			     RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
 
-
-	/* try to allocate tailq entry */
-	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
-	if (te == NULL) {
-		RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
-		goto exit_unlock;
-	}
-
 	mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
 	mempool_size += private_data_size;
 	mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
@@ -923,20 +935,13 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
 					   cache_size);
 	}
 
-	te->data = mp;
-
-	rte_mcfg_tailq_write_lock();
-	TAILQ_INSERT_TAIL(mempool_list, te, next);
-	rte_mcfg_tailq_write_unlock();
 	rte_mcfg_mempool_write_unlock();
-
 	rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
 		private_data_size, flags, mp);
 	return mp;
 
 exit_unlock:
 	rte_mcfg_mempool_write_unlock();
-	rte_free(te);
 	rte_mempool_free(mp);
 	return NULL;
 }
-- 
2.37.0 (Apple Git-136)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess
  2022-11-15 12:35 [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess Fengnan Chang
@ 2022-11-22 15:24 ` Olivier Matz
  2022-11-29  9:57   ` [External] " Fengnan Chang
  0 siblings, 1 reply; 4+ messages in thread
From: Olivier Matz @ 2022-11-22 15:24 UTC (permalink / raw)
  To: Fengnan Chang; +Cc: david.marchand, mb, dev

Hi,

On Tue, Nov 15, 2022 at 08:35:02PM +0800, Fengnan Chang wrote:
> rte_mempool_create put tailq entry into rte_mempool_tailq list before
> populate, and pool_data set when populate. So in multi process, if
> process A create mempool, and process B can get mempool through
> rte_mempool_lookup before pool_data set, if B call rte_mempool_avail_count,
> it will cause segment fault.
> 
> Fix this by put tailq entry into rte_mempool_tailq after populate.
> 
> Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
> ---
>  lib/mempool/rte_mempool.c | 43 ++++++++++++++++++++++-----------------
>  1 file changed, 24 insertions(+), 19 deletions(-)
> 
> diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
> index 4c78071a34..b3a6572fc8 100644
> --- a/lib/mempool/rte_mempool.c
> +++ b/lib/mempool/rte_mempool.c
> @@ -155,6 +155,27 @@ get_min_page_size(int socket_id)
>  	return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
>  }
>  
> +static int
> +add_mempool_to_list(struct rte_mempool *mp)
> +{
> +	struct rte_mempool_list *mempool_list;
> +	struct rte_tailq_entry *te = NULL;
> +
> +	/* try to allocate tailq entry */
> +	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> +	if (te == NULL) {
> +		RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
> +		return -ENOMEM;
> +	}
> +
> +	te->data = mp;
> +	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
> +	rte_mcfg_tailq_write_lock();
> +	TAILQ_INSERT_TAIL(mempool_list, te, next);
> +	rte_mcfg_tailq_write_unlock();
> +
> +	return 0;
> +}
>  
>  static void
>  mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
> @@ -304,6 +325,9 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
>  		if (ret != 0)
>  			return ret;
>  		mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
> +		ret = add_mempool_to_list(mp);
> +		if (ret != 0)
> +			return ret;

One issue here is that if the rte_zmalloc("MEMPOOL_TAILQ_ENTRY") fails,
the function will fail, but rte_mempool_ops_alloc() may already be
successful.

I agree it's theorical, because an allocation failure would cause more
issues at the end. But, to be rigorous, I think we should do something
like this instead (not tested, just for the idea):

	static int
	mempool_ops_alloc_once(struct rte_mempool *mp)
	{
		struct rte_mempool_list *mempool_list;
		struct rte_tailq_entry *te = NULL;
		int ret;

		/* only create the driver ops and add in tailq in if not already done */
		if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED))
			return 0;

		te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
		if (te == NULL) {
			RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
			ret = -rte_errno;
			goto fail;
		}
		te->data = mp;
		mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);

		ret = rte_mempool_ops_alloc(mp);
		if (ret != 0)
			goto fail;

		mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
		rte_mcfg_tailq_write_lock();
		TAILQ_INSERT_TAIL(mempool_list, te, next);
		rte_mcfg_tailq_write_unlock();

		return 0;

	fail:
		rte_free(te);
		return ret;
	}


Thinking a bit more about the problem itself: the segfault that you
describe could also happen in a primary, without multi-process:
- create an empty mempool
- call rte_mempool_avail_count() before it is populated

This simply means that an empty mempool is not ready for use, until
rte_mempool_set_ops_byname() or rte_mempool_populate*() is called. This
is something that we should document above the declaration of
rte_mempool_create_empty(). We could also say there that the mempool
will become visible to the secondary processes as soon as the driver ops
are set.

However I still believe that a better synchronization point is required
in the application. After all, the presence in the TAILQ does not give
any hint on the status of the object. Can we imagine a case where a
mempool is created empty in a primary, and populated in a secondary? If
such use-case exist, we may not want to take this patch.

>  	}
>  	return 0;
>  }
> @@ -798,9 +822,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
>  	int socket_id, unsigned flags)
>  {
>  	char mz_name[RTE_MEMZONE_NAMESIZE];
> -	struct rte_mempool_list *mempool_list;
>  	struct rte_mempool *mp = NULL;
> -	struct rte_tailq_entry *te = NULL;
>  	const struct rte_memzone *mz = NULL;
>  	size_t mempool_size;
>  	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
> @@ -820,8 +842,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
>  			  RTE_CACHE_LINE_MASK) != 0);
>  #endif
>  
> -	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
> -
>  	/* asked for zero items */
>  	if (n == 0) {
>  		rte_errno = EINVAL;
> @@ -866,14 +886,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
>  	private_data_size = (private_data_size +
>  			     RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
>  
> -
> -	/* try to allocate tailq entry */
> -	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> -	if (te == NULL) {
> -		RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
> -		goto exit_unlock;
> -	}
> -
>  	mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
>  	mempool_size += private_data_size;
>  	mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
> @@ -923,20 +935,13 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
>  					   cache_size);
>  	}
>  
> -	te->data = mp;
> -
> -	rte_mcfg_tailq_write_lock();
> -	TAILQ_INSERT_TAIL(mempool_list, te, next);
> -	rte_mcfg_tailq_write_unlock();
>  	rte_mcfg_mempool_write_unlock();
> -
>  	rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
>  		private_data_size, flags, mp);
>  	return mp;
>  
>  exit_unlock:
>  	rte_mcfg_mempool_write_unlock();
> -	rte_free(te);
>  	rte_mempool_free(mp);
>  	return NULL;
>  }
> -- 
> 2.37.0 (Apple Git-136)
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [External] Re: [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess
  2022-11-22 15:24 ` Olivier Matz
@ 2022-11-29  9:57   ` Fengnan Chang
  2023-07-17 16:43     ` Stephen Hemminger
  0 siblings, 1 reply; 4+ messages in thread
From: Fengnan Chang @ 2022-11-29  9:57 UTC (permalink / raw)
  To: Olivier Matz; +Cc: david.marchand, mb, dev

Olivier Matz <olivier.matz@6wind.com> 于2022年11月22日周二 23:25写道:
>
> Hi,
>
> On Tue, Nov 15, 2022 at 08:35:02PM +0800, Fengnan Chang wrote:
> > rte_mempool_create put tailq entry into rte_mempool_tailq list before
> > populate, and pool_data set when populate. So in multi process, if
> > process A create mempool, and process B can get mempool through
> > rte_mempool_lookup before pool_data set, if B call rte_mempool_avail_count,
> > it will cause segment fault.
> >
> > Fix this by put tailq entry into rte_mempool_tailq after populate.
> >
> > Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
> > ---
> >  lib/mempool/rte_mempool.c | 43 ++++++++++++++++++++++-----------------
> >  1 file changed, 24 insertions(+), 19 deletions(-)
> >
> > diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
> > index 4c78071a34..b3a6572fc8 100644
> > --- a/lib/mempool/rte_mempool.c
> > +++ b/lib/mempool/rte_mempool.c
> > @@ -155,6 +155,27 @@ get_min_page_size(int socket_id)
> >       return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
> >  }
> >
> > +static int
> > +add_mempool_to_list(struct rte_mempool *mp)
> > +{
> > +     struct rte_mempool_list *mempool_list;
> > +     struct rte_tailq_entry *te = NULL;
> > +
> > +     /* try to allocate tailq entry */
> > +     te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> > +     if (te == NULL) {
> > +             RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
> > +             return -ENOMEM;
> > +     }
> > +
> > +     te->data = mp;
> > +     mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
> > +     rte_mcfg_tailq_write_lock();
> > +     TAILQ_INSERT_TAIL(mempool_list, te, next);
> > +     rte_mcfg_tailq_write_unlock();
> > +
> > +     return 0;
> > +}
> >
> >  static void
> >  mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
> > @@ -304,6 +325,9 @@ mempool_ops_alloc_once(struct rte_mempool *mp)
> >               if (ret != 0)
> >                       return ret;
> >               mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
> > +             ret = add_mempool_to_list(mp);
> > +             if (ret != 0)
> > +                     return ret;
>
> One issue here is that if the rte_zmalloc("MEMPOOL_TAILQ_ENTRY") fails,
> the function will fail, but rte_mempool_ops_alloc() may already be
> successful.
>
> I agree it's theorical, because an allocation failure would cause more
> issues at the end. But, to be rigorous, I think we should do something
> like this instead (not tested, just for the idea):
>
>         static int
>         mempool_ops_alloc_once(struct rte_mempool *mp)
>         {
>                 struct rte_mempool_list *mempool_list;
>                 struct rte_tailq_entry *te = NULL;
>                 int ret;
>
>                 /* only create the driver ops and add in tailq in if not already done */
>                 if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED))
>                         return 0;
>
>                 te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
>                 if (te == NULL) {
>                         RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
>                         ret = -rte_errno;
>                         goto fail;
>                 }
>                 te->data = mp;
>                 mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
>
>                 ret = rte_mempool_ops_alloc(mp);
>                 if (ret != 0)
>                         goto fail;
>
>                 mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
>                 rte_mcfg_tailq_write_lock();
>                 TAILQ_INSERT_TAIL(mempool_list, te, next);
>                 rte_mcfg_tailq_write_unlock();
>
>                 return 0;
>
>         fail:
>                 rte_free(te);
>                 return ret;
>         }
>
>
> Thinking a bit more about the problem itself: the segfault that you
> describe could also happen in a primary, without multi-process:
> - create an empty mempool
> - call rte_mempool_avail_count() before it is populated
>
> This simply means that an empty mempool is not ready for use, until
> rte_mempool_set_ops_byname() or rte_mempool_populate*() is called. This
> is something that we should document above the declaration of
> rte_mempool_create_empty(). We could also say there that the mempool
> will become visible to the secondary processes as soon as the driver ops
> are set.
>
> However I still believe that a better synchronization point is required
> in the application. After all, the presence in the TAILQ does not give
> any hint on the status of the object. Can we imagine a case where a
> mempool is created empty in a primary, and populated in a secondary? If
> such use-case exist, we may not want to take this patch.

Maybe there is a case like you said, do you think adding check mempool flags in
rte_mempool_avail_count is acceptable ?
If RTE_MEMPOOL_F_POOL_CREATED not set, just return 0.

>
> >       }
> >       return 0;
> >  }
> > @@ -798,9 +822,7 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
> >       int socket_id, unsigned flags)
> >  {
> >       char mz_name[RTE_MEMZONE_NAMESIZE];
> > -     struct rte_mempool_list *mempool_list;
> >       struct rte_mempool *mp = NULL;
> > -     struct rte_tailq_entry *te = NULL;
> >       const struct rte_memzone *mz = NULL;
> >       size_t mempool_size;
> >       unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
> > @@ -820,8 +842,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
> >                         RTE_CACHE_LINE_MASK) != 0);
> >  #endif
> >
> > -     mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
> > -
> >       /* asked for zero items */
> >       if (n == 0) {
> >               rte_errno = EINVAL;
> > @@ -866,14 +886,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
> >       private_data_size = (private_data_size +
> >                            RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
> >
> > -
> > -     /* try to allocate tailq entry */
> > -     te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
> > -     if (te == NULL) {
> > -             RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
> > -             goto exit_unlock;
> > -     }
> > -
> >       mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
> >       mempool_size += private_data_size;
> >       mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
> > @@ -923,20 +935,13 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
> >                                          cache_size);
> >       }
> >
> > -     te->data = mp;
> > -
> > -     rte_mcfg_tailq_write_lock();
> > -     TAILQ_INSERT_TAIL(mempool_list, te, next);
> > -     rte_mcfg_tailq_write_unlock();
> >       rte_mcfg_mempool_write_unlock();
> > -
> >       rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
> >               private_data_size, flags, mp);
> >       return mp;
> >
> >  exit_unlock:
> >       rte_mcfg_mempool_write_unlock();
> > -     rte_free(te);
> >       rte_mempool_free(mp);
> >       return NULL;
> >  }
> > --
> > 2.37.0 (Apple Git-136)
> >

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [External] Re: [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess
  2022-11-29  9:57   ` [External] " Fengnan Chang
@ 2023-07-17 16:43     ` Stephen Hemminger
  0 siblings, 0 replies; 4+ messages in thread
From: Stephen Hemminger @ 2023-07-17 16:43 UTC (permalink / raw)
  To: Fengnan Chang; +Cc: Olivier Matz, david.marchand, mb, dev

On Tue, 29 Nov 2022 17:57:05 +0800
Fengnan Chang <changfengnan@bytedance.com> wrote:

> Olivier Matz <olivier.matz@6wind.com> 于2022年11月22日周二 23:25写道:
> >
> > Hi,
> >
> > On Tue, Nov 15, 2022 at 08:35:02PM +0800, Fengnan Chang wrote:  
> > > rte_mempool_create put tailq entry into rte_mempool_tailq list before
> > > populate, and pool_data set when populate. So in multi process, if
> > > process A create mempool, and process B can get mempool through
> > > rte_mempool_lookup before pool_data set, if B call rte_mempool_avail_count,
> > > it will cause segment fault.
> > >
> > > Fix this by put tailq entry into rte_mempool_tailq after populate.
> > >
> > > Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>

Why not just handle this in rte_mempool_avail_count?  It would be much simpler there.


diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 4d337fca8dcd..14855e21801f 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -1006,6 +1006,10 @@ rte_mempool_avail_count(const struct rte_mempool *mp)
        unsigned count;
        unsigned lcore_id;
 
+       /* Handle race where pool created but ops not allocated yet */
+       if (!(mp->flags & RTE_MEMPOOL_F_POOL_CREATED))
+               return 0;
+
        count = rte_mempool_ops_get_count(mp);
 
        if (mp->cache_size == 0)

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-07-17 16:43 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-15 12:35 [PATCH v2] mempool: fix rte_mempool_avail_count may segment fault when used in multiprocess Fengnan Chang
2022-11-22 15:24 ` Olivier Matz
2022-11-29  9:57   ` [External] " Fengnan Chang
2023-07-17 16:43     ` Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).