DPDK patches and discussions
 help / color / mirror / Atom feed
From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
To: dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	Yuanhan Liu <yuanhan.liu@linux.intel.com>
Subject: [dpdk-dev] [PATCH v2 4/7] vhost: add dequeue zero copy
Date: Fri, 23 Sep 2016 12:13:24 +0800	[thread overview]
Message-ID: <1474604007-5221-5-git-send-email-yuanhan.liu@linux.intel.com> (raw)
In-Reply-To: <1474604007-5221-1-git-send-email-yuanhan.liu@linux.intel.com>

The basic idea of dequeue zero copy is, instead of copying data from
the desc buf, here we let the mbuf reference the desc buf addr directly.

Doing so, however, has one major issue: we can't update the used ring
at the end of rte_vhost_dequeue_burst. Because we don't do the copy
here, an update of the used ring would let the driver to reclaim the
desc buf. As a result, DPDK might reference a stale memory region.

To update the used ring properly, this patch does several tricks:

- when mbuf references a desc buf, refcnt is added by 1.

  This is to pin lock the mbuf, so that a mbuf free from the DPDK
  won't actually free it, instead, refcnt is subtracted by 1.

- We chain all those mbuf together (by tailq)

  And we check it every time on the rte_vhost_dequeue_burst entrance,
  to see if the mbuf is freed (when refcnt equals to 1). If that
  happens, it means we are the last user of this mbuf and we are
  safe to update the used ring.

- "struct zcopy_mbuf" is introduced, to associate an mbuf with the
  right desc idx.

Dequeue zero copy is introduced for performance reason, and some rough
tests show about 50% perfomance boost for packet size 1500B. For small
packets, (e.g. 64B), it actually slows a bit down (well, it could up to
15%). That is expected because this patch introduces some extra works,
and it outweighs the benefit from saving few bytes copy.

Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
---

v2: - use unlikely/likely for dequeue_zero_copy check, as it's not enabled
      by default, as well as it has some limitations in vm2nic case.

    - handle the case that a desc buf might across 2 host phys pages

    - reset nr_zmbuf to 0 at set_vring_num

    - set the zmbuf_size to vq->size, but not the double of it.
---
 lib/librte_vhost/vhost.c      |   2 +
 lib/librte_vhost/vhost.h      |  22 +++++-
 lib/librte_vhost/vhost_user.c |  42 +++++++++-
 lib/librte_vhost/virtio_net.c | 173 +++++++++++++++++++++++++++++++++++++-----
 4 files changed, 219 insertions(+), 20 deletions(-)

diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 46095c3..ab25649 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -141,6 +141,8 @@ init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
 	/* always set the default vq pair to enabled */
 	if (qp_idx == 0)
 		vq->enabled = 1;
+
+	TAILQ_INIT(&vq->zmbuf_list);
 }
 
 static void
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 8565fa1..be8a398 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -36,6 +36,7 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <sys/types.h>
+#include <sys/queue.h>
 #include <unistd.h>
 #include <linux/vhost.h>
 
@@ -61,6 +62,19 @@ struct buf_vector {
 	uint32_t desc_idx;
 };
 
+/*
+ * A structure to hold some fields needed in zero copy code path,
+ * mainly for associating an mbuf with the right desc_idx.
+ */
+struct zcopy_mbuf {
+	struct rte_mbuf *mbuf;
+	uint32_t desc_idx;
+	uint16_t in_use;
+
+	TAILQ_ENTRY(zcopy_mbuf) next;
+};
+TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
+
 /**
  * Structure contains variables relevant to RX/TX virtqueues.
  */
@@ -85,6 +99,12 @@ struct vhost_virtqueue {
 
 	/* Physical address of used ring, for logging */
 	uint64_t		log_guest_addr;
+
+	uint16_t		nr_zmbuf;
+	uint16_t		zmbuf_size;
+	uint16_t		last_zmbuf_idx;
+	struct zcopy_mbuf	*zmbufs;
+	struct zcopy_mbuf_list	zmbuf_list;
 } __rte_cache_aligned;
 
 /* Old kernels have no such macro defined */
@@ -135,6 +155,7 @@ struct virtio_net {
 	/* to tell if we need broadcast rarp packet */
 	rte_atomic16_t		broadcast_rarp;
 	uint32_t		virt_qp_nb;
+	int			dequeue_zero_copy;
 	struct vhost_virtqueue	*virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
 	char			ifname[IF_NAME_SZ];
@@ -146,7 +167,6 @@ struct virtio_net {
 	uint32_t		nr_guest_pages;
 	uint32_t		max_guest_pages;
 	struct guest_page       *guest_pages;
-
 } __rte_cache_aligned;
 
 /**
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index a92377a..ac40408 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -180,7 +180,23 @@ static int
 vhost_user_set_vring_num(struct virtio_net *dev,
 			 struct vhost_vring_state *state)
 {
-	dev->virtqueue[state->index]->size = state->num;
+	struct vhost_virtqueue *vq = dev->virtqueue[state->index];
+
+	vq->size = state->num;
+
+	if (dev->dequeue_zero_copy) {
+		vq->nr_zmbuf = 0;
+		vq->last_zmbuf_idx = 0;
+		vq->zmbuf_size = vq->size;
+		vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
+					 sizeof(struct zcopy_mbuf), 0);
+		if (vq->zmbufs == NULL) {
+			RTE_LOG(WARNING, VHOST_CONFIG,
+				"failed to allocate mem for zero copy; "
+				"zero copy is force disabled\n");
+			dev->dequeue_zero_copy = 0;
+		}
+	}
 
 	return 0;
 }
@@ -662,11 +678,32 @@ vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
 	vq->kickfd = file.fd;
 
 	if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
+		if (dev->dequeue_zero_copy) {
+			RTE_LOG(INFO, VHOST_CONFIG,
+				"Tx zero copy is enabled\n");
+		}
+
 		if (notify_ops->new_device(dev->vid) == 0)
 			dev->flags |= VIRTIO_DEV_RUNNING;
 	}
 }
 
+static void
+free_zmbufs(struct vhost_virtqueue *vq)
+{
+	struct zcopy_mbuf *zmbuf, *next;
+
+	for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+	     zmbuf != NULL; zmbuf = next) {
+		next = TAILQ_NEXT(zmbuf, next);
+
+		rte_pktmbuf_free(zmbuf->mbuf);
+		TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+	}
+
+	rte_free(vq->zmbufs);
+}
+
 /*
  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
  */
@@ -695,6 +732,9 @@ vhost_user_get_vring_base(struct virtio_net *dev,
 
 	dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
 
+	if (dev->dequeue_zero_copy)
+		free_zmbufs(dev->virtqueue[state->index]);
+
 	return 0;
 }
 
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 1c2ee47..215542c 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -678,6 +678,43 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
 	return 0;
 }
 
+static inline struct zcopy_mbuf *__attribute__((always_inline))
+get_zmbuf(struct vhost_virtqueue *vq)
+{
+	uint16_t i;
+	uint16_t last;
+	int tries = 0;
+
+	/* search [last_zmbuf_idx, zmbuf_size) */
+	i = vq->last_zmbuf_idx;
+	last = vq->zmbuf_size;
+
+again:
+	for (; i < last; i++) {
+		if (vq->zmbufs[i].in_use == 0) {
+			vq->last_zmbuf_idx = i + 1;
+			vq->zmbufs[i].in_use = 1;
+			return &vq->zmbufs[i];
+		}
+	}
+
+	tries++;
+	if (tries == 1) {
+		/* search [0, last_zmbuf_idx) */
+		i = 0;
+		last = vq->last_zmbuf_idx;
+		goto again;
+	}
+
+	return NULL;
+}
+
+static inline void __attribute__((always_inline))
+put_zmbuf(struct zcopy_mbuf *zmbuf)
+{
+	zmbuf->in_use = 0;
+}
+
 static inline int __attribute__((always_inline))
 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		  struct rte_mbuf *m, uint16_t desc_idx,
@@ -701,6 +738,27 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	if (unlikely(!desc_addr))
 		return -1;
 
+	if (unlikely(dev->dequeue_zero_copy)) {
+		struct zcopy_mbuf *zmbuf;
+
+		zmbuf = get_zmbuf(vq);
+		if (!zmbuf)
+			return -1;
+		zmbuf->mbuf = m;
+		zmbuf->desc_idx = desc_idx;
+
+		/*
+		 * Pin lock the mbuf; we will check later to see whether
+		 * the mbuf is freed (when we are the last user) or not.
+		 * If that's the case, we then could update the used ring
+		 * safely.
+		 */
+		rte_mbuf_refcnt_update(m, 1);
+
+		vq->nr_zmbuf += 1;
+		TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+	}
+
 	hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
 	rte_prefetch0(hdr);
 
@@ -732,10 +790,33 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	mbuf_offset = 0;
 	mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
 	while (1) {
+		uint64_t hpa;
+
 		cpy_len = RTE_MIN(desc_avail, mbuf_avail);
-		rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset),
-			(void *)((uintptr_t)(desc_addr + desc_offset)),
-			cpy_len);
+
+		/*
+		 * A desc buf might across two host physical pages that are
+		 * not continuous. In such case (gpa_to_hpa returns 0), data
+		 * will be copied even though zero copy is enabled.
+		 */
+		if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
+					desc->addr + desc_offset, cpy_len)))) {
+			cur->data_len = cpy_len;
+			cur->data_off = 0;
+			cur->buf_addr = (void *)(uintptr_t)desc_addr;
+			cur->buf_physaddr = hpa;
+
+			/*
+			 * In zero copy mode, one mbuf can only reference data
+			 * for one or partial of one desc buff.
+			 */
+			mbuf_avail = cpy_len;
+		} else {
+			rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+							   mbuf_offset),
+				(void *)((uintptr_t)(desc_addr + desc_offset)),
+				cpy_len);
+		}
 
 		mbuf_avail  -= cpy_len;
 		mbuf_offset += cpy_len;
@@ -796,6 +877,49 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
+static inline void __attribute__((always_inline))
+update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		 uint32_t used_idx, uint32_t desc_idx)
+{
+	vq->used->ring[used_idx].id  = desc_idx;
+	vq->used->ring[used_idx].len = 0;
+	vhost_log_used_vring(dev, vq,
+			offsetof(struct vring_used, ring[used_idx]),
+			sizeof(vq->used->ring[used_idx]));
+}
+
+static inline void __attribute__((always_inline))
+update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		uint32_t count)
+{
+	if (count == 0)
+		return;
+
+	rte_smp_wmb();
+	rte_smp_rmb();
+
+	vq->used->idx += count;
+	vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
+			sizeof(vq->used->idx));
+
+	/* Kick guest if required. */
+	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+			&& (vq->callfd >= 0))
+		eventfd_write(vq->callfd, (eventfd_t)1);
+}
+
+static inline bool __attribute__((always_inline))
+mbuf_is_consumed(struct rte_mbuf *m)
+{
+	while (m) {
+		if (rte_mbuf_refcnt_read(m) > 1)
+			return false;
+		m = m->next;
+	}
+
+	return true;
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -823,6 +947,30 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	if (unlikely(vq->enabled == 0))
 		return 0;
 
+	if (unlikely(dev->dequeue_zero_copy)) {
+		struct zcopy_mbuf *zmbuf, *next;
+		int nr_updated = 0;
+
+		for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+		     zmbuf != NULL; zmbuf = next) {
+			next = TAILQ_NEXT(zmbuf, next);
+
+			if (mbuf_is_consumed(zmbuf->mbuf)) {
+				used_idx = vq->last_used_idx++ & (vq->size - 1);
+				update_used_ring(dev, vq, used_idx,
+						 zmbuf->desc_idx);
+				nr_updated += 1;
+
+				TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+				rte_pktmbuf_free(zmbuf->mbuf);
+				put_zmbuf(zmbuf);
+				vq->nr_zmbuf -= 1;
+			}
+		}
+
+		update_used_idx(dev, vq, nr_updated);
+	}
+
 	/*
 	 * Construct a RARP broadcast packet, and inject it to the "pkts"
 	 * array, to looks like that guest actually send such packet.
@@ -870,11 +1018,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		used_idx  = (vq->last_used_idx  + i) & (vq->size - 1);
 		desc_indexes[i] = vq->avail->ring[avail_idx];
 
-		vq->used->ring[used_idx].id  = desc_indexes[i];
-		vq->used->ring[used_idx].len = 0;
-		vhost_log_used_vring(dev, vq,
-				offsetof(struct vring_used, ring[used_idx]),
-				sizeof(vq->used->ring[used_idx]));
+		if (likely(dev->dequeue_zero_copy == 0))
+			update_used_ring(dev, vq, used_idx, desc_indexes[i]);
 	}
 
 	/* Prefetch descriptor index. */
@@ -898,19 +1043,11 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 			break;
 		}
 	}
-
-	rte_smp_wmb();
-	rte_smp_rmb();
-	vq->used->idx += i;
 	vq->last_avail_idx += i;
 	vq->last_used_idx  += i;
-	vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
-			sizeof(vq->used->idx));
 
-	/* Kick guest if required. */
-	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
-			&& (vq->callfd >= 0))
-		eventfd_write(vq->callfd, (eventfd_t)1);
+	if (likely(dev->dequeue_zero_copy == 0))
+		update_used_idx(dev, vq, i);
 
 out:
 	if (unlikely(rarp_mbuf != NULL)) {
-- 
1.9.0

  parent reply	other threads:[~2016-09-23  4:13 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-23  8:10 [dpdk-dev] [PATCH 0/6] vhost: add Tx zero copy support Yuanhan Liu
2016-08-23  8:10 ` [dpdk-dev] [PATCH 1/6] vhost: simplify memory regions handling Yuanhan Liu
2016-08-23  9:17   ` Maxime Coquelin
2016-08-24  7:26   ` Xu, Qian Q
2016-08-24  7:40     ` Yuanhan Liu
2016-08-24  7:36       ` Xu, Qian Q
2016-08-23  8:10 ` [dpdk-dev] [PATCH 2/6] vhost: get guest/host physical address mappings Yuanhan Liu
2016-08-23  9:58   ` Maxime Coquelin
2016-08-23 12:32     ` Yuanhan Liu
2016-08-23 13:25       ` Maxime Coquelin
2016-08-23 13:49         ` Yuanhan Liu
2016-08-23 14:05           ` Maxime Coquelin
2016-08-23  8:10 ` [dpdk-dev] [PATCH 3/6] vhost: introduce last avail idx for Tx Yuanhan Liu
2016-08-23 12:27   ` Maxime Coquelin
2016-08-23  8:10 ` [dpdk-dev] [PATCH 4/6] vhost: add Tx zero copy Yuanhan Liu
2016-08-23 14:04   ` Maxime Coquelin
2016-08-23 14:31     ` Yuanhan Liu
2016-08-23 15:40       ` Maxime Coquelin
2016-08-23  8:10 ` [dpdk-dev] [PATCH 5/6] vhost: add a flag to enable " Yuanhan Liu
2016-09-06  9:00   ` Xu, Qian Q
2016-09-06  9:42     ` Xu, Qian Q
2016-09-06 10:02       ` Yuanhan Liu
2016-09-07  2:43         ` Xu, Qian Q
2016-09-06  9:55     ` Yuanhan Liu
2016-09-07 16:00       ` Thomas Monjalon
2016-09-08  7:21         ` Yuanhan Liu
2016-09-08  7:57           ` Thomas Monjalon
2016-08-23  8:10 ` [dpdk-dev] [PATCH 6/6] examples/vhost: add an option " Yuanhan Liu
2016-08-23  9:31   ` Thomas Monjalon
2016-08-23 12:33     ` Yuanhan Liu
2016-08-23 14:14   ` Maxime Coquelin
2016-08-23 14:45     ` Yuanhan Liu
2016-08-23 14:18 ` [dpdk-dev] [PATCH 0/6] vhost: add Tx zero copy support Maxime Coquelin
2016-08-23 14:42   ` Yuanhan Liu
2016-08-23 14:53     ` Yuanhan Liu
2016-08-23 16:41       ` Maxime Coquelin
2016-08-29  8:32 ` Xu, Qian Q
2016-08-29  8:57   ` Xu, Qian Q
2016-09-23  4:11     ` Yuanhan Liu
2016-10-09 15:20   ` Yuanhan Liu
2016-09-23  4:13 ` [dpdk-dev] [PATCH v2 0/7] vhost: add dequeue " Yuanhan Liu
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 1/7] vhost: simplify memory regions handling Yuanhan Liu
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 2/7] vhost: get guest/host physical address mappings Yuanhan Liu
2016-09-26 20:17     ` Maxime Coquelin
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 3/7] vhost: introduce last avail idx for dequeue Yuanhan Liu
2016-09-23  4:13   ` Yuanhan Liu [this message]
2016-09-26 20:45     ` [dpdk-dev] [PATCH v2 4/7] vhost: add dequeue zero copy Maxime Coquelin
2016-10-06 14:37     ` Xu, Qian Q
2016-10-09  2:03       ` Yuanhan Liu
2016-10-10 10:12         ` Xu, Qian Q
2016-10-10 10:14           ` Maxime Coquelin
2016-10-10 10:22             ` Xu, Qian Q
2016-10-10 10:40               ` Xu, Qian Q
2016-10-10 11:48               ` Maxime Coquelin
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 5/7] vhost: add a flag to enable " Yuanhan Liu
2016-09-26 20:57     ` Maxime Coquelin
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 6/7] examples/vhost: add an option " Yuanhan Liu
2016-09-26 21:05     ` Maxime Coquelin
2016-09-23  4:13   ` [dpdk-dev] [PATCH v2 7/7] net/vhost: " Yuanhan Liu
2016-09-26 21:05     ` Maxime Coquelin
2016-10-09  7:27   ` [dpdk-dev] [PATCH v3 0/7] vhost: add dequeue zero copy support Yuanhan Liu
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 1/7] vhost: simplify memory regions handling Yuanhan Liu
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 2/7] vhost: get guest/host physical address mappings Yuanhan Liu
2016-11-29  3:10       ` linhaifeng
2016-11-29 13:14       ` linhaifeng
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 3/7] vhost: introduce last avail idx for dequeue Yuanhan Liu
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 4/7] vhost: add dequeue zero copy Yuanhan Liu
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 5/7] vhost: add a flag to enable " Yuanhan Liu
2016-10-09  7:27     ` [dpdk-dev] [PATCH v3 6/7] examples/vhost: add an option " Yuanhan Liu
2016-10-09  7:28     ` [dpdk-dev] [PATCH v3 7/7] net/vhost: " Yuanhan Liu
2016-10-11 13:04     ` [dpdk-dev] [PATCH v3 0/7] vhost: add dequeue zero copy support Xu, Qian Q
2016-10-12  7:48     ` Yuanhan Liu
2016-10-09 10:46 ` [dpdk-dev] [PATCH 0/6] vhost: add Tx " linhaifeng
2016-10-10  8:03   ` Yuanhan Liu
2016-10-14  7:30     ` linhaifeng

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1474604007-5221-5-git-send-email-yuanhan.liu@linux.intel.com \
    --to=yuanhan.liu@linux.intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).