Soft Patch Panel
 help / color / mirror / Atom feed
* [spp] [PATCH 0/3] Add getting lcores feature
@ 2019-01-31  3:05 ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 1/3] spp_primary: add lcores in status info ogawa.yasufumi
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-01-31  3:05 UTC (permalink / raw)
  To: ferruh.yigit, spp, ogawa.yasufumi

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

How to assign cores depends on a decision of user, but made a mistake
sometimes because it could be complex if there are many cores and
several DPDK processes running on. User should be able to know while
making a decision of core assiginment.

This series of patches is to add a feature to inspect core usage of
spp_priamry. You can get the core usage from spp-ctl or SPP CLI as a
part of status info.

  spp > pri; status
  - lcores:
    - [0, 1]
  - physical ports:
      ID          rx          tx     tx_drop  mac_addr
       0           0           0           0  56:48:4f:53:54:00

It is also available to get CPU layout from spp-ctl, logical cores and
physical cores on each of sockets. It cannot be referred from SPP CLI
currently. Here is an example of getting CPU layout of single socket.
`jq' is a command to show JSON data in well formatted.

  $ curl http://192.168.1.100:7777/v1/cpus | jq
  [
    {
      "cores": [
        {
          "cpus": [
            1
          ],
          "core_id": 1
        },
        ...
      ],
      "socket_id": 0
    }
  ]

The rest of SPP processes are supported in a future update.


Yasufumi Ogawa (3):
  spp_primary: add lcores in status info
  controller: add lcores in status command of pri
  spp-ctl: add API for getting CPU layout

 src/controller/commands/pri.py | 33 ++++++++++++++++++++-------------
 src/primary/init.h             |  2 ++
 src/primary/main.c             | 34 +++++++++++++++++++++++++++-------
 src/spp-ctl/spp_ctl.py         | 25 +++++++++++++++++++++++++
 src/spp-ctl/spp_webapi.py      |  4 ++++
 5 files changed, 78 insertions(+), 20 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 1/3] spp_primary: add lcores in status info
  2019-01-31  3:05 [spp] [PATCH 0/3] Add getting lcores feature ogawa.yasufumi
@ 2019-01-31  3:05 ` ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 2/3] controller: add lcores in status command of pri ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 3/3] spp-ctl: add API for getting CPU layout ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-01-31  3:05 UTC (permalink / raw)
  To: ferruh.yigit, spp, ogawa.yasufumi

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

This patch is to add lcore used by spp_primary in the result of status
command. It is contained as a list and referred with key `lcores`.

   {
       "lcores": [0, 1],
       "ring_ports": [
       {
           "id": 0,
               "rx": 0,
               "rx_drop": 0,
               "tx": 0,
               "tx_drop": 0
       },
       ...

Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/primary/init.h |  2 ++
 src/primary/main.c | 34 +++++++++++++++++++++++++++-------
 2 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/src/primary/init.h b/src/primary/init.h
index 3ea69f0..6344377 100644
--- a/src/primary/init.h
+++ b/src/primary/init.h
@@ -38,6 +38,8 @@ struct ring_port {
 	} stats;
 };
 
+extern uint8_t lcore_id_used[RTE_MAX_LCORE];
+
 extern struct ring_port *ring_ports;
 
 /* the shared port information: port numbers, rx and tx stats etc. */
diff --git a/src/primary/main.c b/src/primary/main.c
index 9185e3d..c86e9ef 100644
--- a/src/primary/main.c
+++ b/src/primary/main.c
@@ -21,8 +21,9 @@
  * Buffer sizes of status message of primary. Total number of size
  * must be equal to MSG_SIZE 2048 defined in `shared/common.h`.
  */
+#define PRI_BUF_SIZE_LCORE 128
 #define PRI_BUF_SIZE_PHY 512
-#define PRI_BUF_SIZE_RING 1512
+#define PRI_BUF_SIZE_RING (MSG_SIZE - PRI_BUF_SIZE_LCORE - PRI_BUF_SIZE_PHY)
 
 #define SPP_PATH_LEN 1024  /* seems enough for path of spp procs */
 #define NOF_TOKENS 48  /* seems enough to contain tokens */
@@ -43,6 +44,9 @@ static enum cmd_type cmd = STOP;
 
 static struct pollfd pfd;
 
+/* global var for number of rings - extern in header */
+uint8_t lcore_id_used[RTE_MAX_LCORE];
+
 static void
 turn_off(int sig)
 {
@@ -306,6 +310,7 @@ launch_sec_proc(char *sec_name, int sec_id, char **sec_args)
  * Here is an exmaple.
  *
  * {
+ *     "lcores": [0],
  *     "ring_ports": [
  *     {
  *         "id": 0,
@@ -332,15 +337,30 @@ static int
 get_status_json(char *str)
 {
 	int i;
+	int lcore_buf_size = PRI_BUF_SIZE_LCORE;
 	int phyp_buf_size = PRI_BUF_SIZE_PHY;
 	int ringp_buf_size = PRI_BUF_SIZE_RING;
+	char lcore_ids[PRI_BUF_SIZE_LCORE];
 	char phy_ports[phyp_buf_size];
 	char ring_ports[ringp_buf_size];
+	memset(lcore_ids, '\0', lcore_buf_size);
 	memset(phy_ports, '\0', phyp_buf_size);
 	memset(ring_ports, '\0', ringp_buf_size);
 
-	int buf_size = 256;
+	int buf_size = 256;  /* size of temp buffer */
+	char lcore_id[108];  /* seems enough */
 	char phy_port[buf_size];
+	char ring_port[buf_size];
+
+	memset(lcore_id, '\0', sizeof(lcore_id));
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		if (lcore_id_used[i] == 1)
+			sprintf(lcore_id + strlen(lcore_id), "%d,", i);
+	}
+	sprintf(lcore_id + strlen(lcore_id) - 1, "%s", "");
+
+	sprintf(lcore_ids, "\"lcores\":[%s]", lcore_id);
+
 	for (i = 0; i < ports->num_ports; i++) {
 
 		RTE_LOG(DEBUG, PRIMARY, "Size of phy_ports str: %d\n",
@@ -373,7 +393,6 @@ get_status_json(char *str)
 			sprintf(phy_ports, "%s,", phy_ports);
 	}
 
-	char ring_port[buf_size];
 	for (i = 0; i < num_rings; i++) {
 
 		RTE_LOG(DEBUG, PRIMARY, "Size of ring_ports str: %d\n",
@@ -406,11 +425,12 @@ get_status_json(char *str)
 			sprintf(ring_ports, "%s,", ring_ports);
 	}
 
-	RTE_LOG(DEBUG, PRIMARY, "{\"phy_ports\": [%s], \"ring_ports\": [%s]}",
-			phy_ports, ring_ports);
+	RTE_LOG(DEBUG, PRIMARY,
+			"{%s, \"phy_ports\": [%s], \"ring_ports\": [%s]}\n",
+			lcore_ids, phy_ports, ring_ports);
 
-	sprintf(str, "{\"phy_ports\": [%s], \"ring_ports\": [%s]}",
-			phy_ports, ring_ports);
+	sprintf(str, "{%s, \"phy_ports\": [%s], \"ring_ports\": [%s]}",
+			lcore_ids, phy_ports, ring_ports);
 
 	return 0;
 }
-- 
2.7.4

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 2/3] controller: add lcores in status command of pri
  2019-01-31  3:05 [spp] [PATCH 0/3] Add getting lcores feature ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 1/3] spp_primary: add lcores in status info ogawa.yasufumi
@ 2019-01-31  3:05 ` ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 3/3] spp-ctl: add API for getting CPU layout ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-01-31  3:05 UTC (permalink / raw)
  To: ferruh.yigit, spp, ogawa.yasufumi

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

This update is to show lcores in the result of status command of pri in
SPP CLI. It is also changed the format of message to be similar as
other processes. Here is an example of using lcore 0 and 3.

    - lcores:
        [0, 3]
    - physical ports:
        ID          rx          tx     tx_drop  mac_addr
         0    78932932    78932931           1  56:48:4f:53:54:00
    - ring ports:
        ID          rx          tx     rx_drop     rx_drop
         0       89283       89283           0           0
         ...

Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/controller/commands/pri.py | 33 ++++++++++++++++++++-------------
 1 file changed, 20 insertions(+), 13 deletions(-)

diff --git a/src/controller/commands/pri.py b/src/controller/commands/pri.py
index 662e41a..1f60ece 100644
--- a/src/controller/commands/pri.py
+++ b/src/controller/commands/pri.py
@@ -83,6 +83,7 @@ class SppPrimary(object):
         long.
 
             {
+                "lcores": [0, 3],
                 "phy_ports": [
                     {
                         "eth": "56:48:4f:12:34:00",
@@ -107,28 +108,34 @@ class SppPrimary(object):
 
         It is formatted to be simple and more understandable.
 
-            Physical Ports:
-              ID          rx          tx     tx_drop  mac_addr
-               0    78932932    78932931           1  56:48:4f:53:54:00
-            Ring Ports:
-              ID          rx          tx     rx_drop     rx_drop
-               0       89283       89283           0           0
-               ...
+            - lcores:
+                [0, 3]
+            - physical ports:
+                ID          rx          tx     tx_drop  mac_addr
+                 0    78932932    78932931           1  56:48:4f:53:54:00
+            - ring ports:
+                ID          rx          tx     rx_drop     rx_drop
+                 0       89283       89283           0           0
+                 ...
         """
 
+        if 'lcores' in json_obj:
+            print('- lcores:')
+            print('  - {}'.format(json_obj['lcores']))
+
         if 'phy_ports' in json_obj:
-            print('Physical Ports:')
-            print('  ID          rx          tx     tx_drop  mac_addr')
+            print('- physical ports:')
+            print('    ID          rx          tx     tx_drop  mac_addr')
             for pports in json_obj['phy_ports']:
-                print('  %2d  %10d  %10d  %10d  %s' % (
+                print('    %2d  %10d  %10d  %10d  %s' % (
                     pports['id'], pports['rx'],  pports['tx'],
                     pports['tx_drop'], pports['eth']))
 
         if 'ring_ports' in json_obj:
-            print('Ring Ports:')
-            print('  ID          rx          tx     rx_drop     rx_drop')
+            print('- ring Ports:')
+            print('    ID          rx          tx     rx_drop     rx_drop')
             for rports in json_obj['ring_ports']:
-                print('  %2d  %10d  %10d  %10d  %10d' % (
+                print('    %2d  %10d  %10d  %10d  %10d' % (
                     rports['id'], rports['rx'],  rports['tx'],
                     rports['rx_drop'], rports['tx_drop']))
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [spp] [PATCH 3/3] spp-ctl: add API for getting CPU layout
  2019-01-31  3:05 [spp] [PATCH 0/3] Add getting lcores feature ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 1/3] spp_primary: add lcores in status info ogawa.yasufumi
  2019-01-31  3:05 ` [spp] [PATCH 2/3] controller: add lcores in status command of pri ogawa.yasufumi
@ 2019-01-31  3:05 ` ogawa.yasufumi
  2 siblings, 0 replies; 4+ messages in thread
From: ogawa.yasufumi @ 2019-01-31  3:05 UTC (permalink / raw)
  To: ferruh.yigit, spp, ogawa.yasufumi

From: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>

To get CPU layout for considering assignment, add a REST API for
getting the layout. It calls a helper tool `cpu_layout.py` and returns
the result in JSON. Here is an example.

  [
    {
      "socket_id": 0,
      "cores": [
        {"core_id": 1, "cpus": [1, 5]},
        {"core_id": 2, "cpus": [2, 6]},
        ...
      ]
    },
    {
      "socket_id: 1,
      "cores: [
        ...
      ]
    },
  ]

Signed-off-by: Yasufumi Ogawa <ogawa.yasufumi@lab.ntt.co.jp>
---
 src/spp-ctl/spp_ctl.py    | 25 +++++++++++++++++++++++++
 src/spp-ctl/spp_webapi.py |  4 ++++
 2 files changed, 29 insertions(+)

diff --git a/src/spp-ctl/spp_ctl.py b/src/spp-ctl/spp_ctl.py
index c4dd4b2..f276442 100644
--- a/src/spp-ctl/spp_ctl.py
+++ b/src/spp-ctl/spp_ctl.py
@@ -6,7 +6,9 @@ eventlet.monkey_patch()
 
 import argparse
 import errno
+import json
 import logging
+import os
 import socket
 import subprocess
 
@@ -19,6 +21,8 @@ LOG = logging.getLogger(__name__)
 
 MSG_SIZE = 4096
 
+# relative path of `cpu_layout.py`
+CPU_LAYOUT_TOOL = 'tools/helpers/cpu_layout.py'
 
 class Controller(object):
 
@@ -142,6 +146,27 @@ class Controller(object):
             procs.append(p)
         return procs
 
+    def get_cpu_layout(self):
+        """Get cpu layout with helper tool 'cpu_layout.py'."""
+
+        # This script is 'src/spp-ctl/spp_ctl.py' and it expect to find
+        # the tool in tools/helpers/cpu_layout.py'.
+        cmd_path = "{}/../../{}".format(
+                os.path.dirname(__file__), CPU_LAYOUT_TOOL)
+
+        if os.path.exists(cmd_path):
+            # Get cpu layout as bytes of JSON foramtted string
+            cmd_res = subprocess.check_output(
+                    [cmd_path, '--json'],  # required '--json' option
+                    stderr=subprocess.STDOUT)
+
+            # Decode bytes to str
+            return json.loads(cmd_res.decode('utf-8'))
+
+        else:
+            LOG.error("'{}' cannot be found.".format(CPU_LAYOUT_TOOL))
+            return None
+
     def do_exit(self, proc_type, proc_id):
         removed_id = None  # remove proc info of ID from self.procs
         for proc in self.procs.values():
diff --git a/src/spp-ctl/spp_webapi.py b/src/spp-ctl/spp_webapi.py
index 10b4098..39530dd 100644
--- a/src/spp-ctl/spp_webapi.py
+++ b/src/spp-ctl/spp_webapi.py
@@ -152,11 +152,15 @@ class V1Handler(BaseHandler):
 
     def set_route(self):
         self.route('/processes', 'GET', callback=self.get_processes)
+        self.route('/cpus', 'GET', callback=self.get_cpu_layout)
 
     def get_processes(self):
         LOG.info("get processes called.")
         return self.ctrl.get_processes()
 
+    def get_cpu_layout(self):
+        LOG.info("get cpu layout called.")
+        return self.ctrl.get_cpu_layout()
 
 class V1VFCommon(object):
     """Define common methods for vf and mirror handler."""
-- 
2.7.4

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-01-31  3:07 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-31  3:05 [spp] [PATCH 0/3] Add getting lcores feature ogawa.yasufumi
2019-01-31  3:05 ` [spp] [PATCH 1/3] spp_primary: add lcores in status info ogawa.yasufumi
2019-01-31  3:05 ` [spp] [PATCH 2/3] controller: add lcores in status command of pri ogawa.yasufumi
2019-01-31  3:05 ` [spp] [PATCH 3/3] spp-ctl: add API for getting CPU layout ogawa.yasufumi

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).