From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id DD9FF42BF1;
	Mon,  5 Jun 2023 14:47:19 +0200 (CEST)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 11D84410D7;
	Mon,  5 Jun 2023 14:47:17 +0200 (CEST)
Received: from mail-vk1-f174.google.com (mail-vk1-f174.google.com
 [209.85.221.174])
 by mails.dpdk.org (Postfix) with ESMTP id 6A00240E2D
 for <dev@dpdk.org>; Mon,  5 Jun 2023 14:47:15 +0200 (CEST)
Received: by mail-vk1-f174.google.com with SMTP id
 71dfb90a1353d-4639f65a2f0so256288e0c.2
 for <dev@dpdk.org>; Mon, 05 Jun 2023 05:47:15 -0700 (PDT)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=gmail.com; s=20221208; t=1685969235; x=1688561235;
 h=content-transfer-encoding:cc:to:subject:message-id:date:from
 :in-reply-to:references:mime-version:from:to:cc:subject:date
 :message-id:reply-to;
 bh=CXsllEuG08nVjDqv3HkiXAGuqSBemQLCvuH0OpziOxU=;
 b=cOowB/IfiOEz8CcMziaQ4cX08jQ5bbUVouDfetpWmfroFvfWZHmFi2CH/Z58NZuEsQ
 otM5PRDrZphoH+1JxgfirqarPAdQR6eG8HmS3hAr8EQwQOx62kQnYnyA9ZbrsiiriSgy
 rgR/ZyfCrPYI5NaAaE8NwHea+prebCnPbjh1wFJqwxq0YnMC20lk2b78pVb+6p/9wisr
 6y+PqGUkuYnJiq8qdD0/5g+cNvnRt/cBqrO09DQA4AKg+6NyZn5tqfQGSZ3CL4JgrTmV
 0xgzBT6k/7LA8DHWW+Zw18l5bd7XoJPVaWR0PaozOPuRY7fTYGIvRY2WlVlYaAo1kAR2
 GSwg==
X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
 d=1e100.net; s=20221208; t=1685969235; x=1688561235;
 h=content-transfer-encoding:cc:to:subject:message-id:date:from
 :in-reply-to:references:mime-version:x-gm-message-state:from:to:cc
 :subject:date:message-id:reply-to;
 bh=CXsllEuG08nVjDqv3HkiXAGuqSBemQLCvuH0OpziOxU=;
 b=jBn06ynqjOOQdT7Xou7gkXAYpsGYP0ezu/KMbZNN7nuNhbsGZDsmRFtr90OZYNv4EJ
 8xS5BRI0H5X3Su8Ximf6mhRWuMlWmMvNe4mLGzYBt4StpgmAOYBqXUYKlB0zeXUDzZU4
 NVY8YL323l477RS7gcwLacz2vNuqy5rPQquzlOZZfSJSU32tsTwXdfJ8C8gYyiLM9kon
 TnJiDXmUfHTkteKbP9mxCfwkF25JeW/pKSNk6S+ds3rouwGXANJMF9tUzyOOW/k47YjV
 yfmFwot+1q3OwVbhKnJq2zT1rivAx/4Zeb5VHxMlM2dV90kJlBJweZ37bajrPz1R1x3f
 /2gg==
X-Gm-Message-State: AC+VfDxft2vofhH0vDwVUremElsYfIUnNTdqDZ8GMPRCBnKrlWptGOB3
 i29/90NErLs1jSwL1T/t4bljhAzjPuJQn5nniiU=
X-Google-Smtp-Source: ACHHUZ4llCl0f2hOMS7hq9V96rgeyBnS457ViK/2u3gJ0ZmpcGB53YjHAhCCpQmp3HoMAfCaojoZuyKpyCQC6ZRBREc=
X-Received: by 2002:a1f:5e12:0:b0:464:e1e3:44f5 with SMTP id
 s18-20020a1f5e12000000b00464e1e344f5mr714378vkb.12.1685969234283; Mon, 05 Jun
 2023 05:47:14 -0700 (PDT)
MIME-Version: 1.0
References: <20230509060347.1237884-1-zhirun.yan@intel.com>
 <20230605111923.3772260-1-zhirun.yan@intel.com>
 <20230605111923.3772260-10-zhirun.yan@intel.com>
In-Reply-To: <20230605111923.3772260-10-zhirun.yan@intel.com>
From: Jerin Jacob <jerinjacobk@gmail.com>
Date: Mon, 5 Jun 2023 18:16:48 +0530
Message-ID: <CALBAE1Pn0U0PkgjCm4ogRiFY+0CTc21ziOTq7vA-Qcg9VE64KA@mail.gmail.com>
Subject: Re: [PATCH v7 09/17] graph: add structure for stream moving between
 cores
To: Zhirun Yan <zhirun.yan@intel.com>
Cc: dev@dpdk.org, jerinj@marvell.com, kirankumark@marvell.com, 
 ndabilpuram@marvell.com, stephen@networkplumber.org, pbhagavatula@marvell.com, 
 cunming.liang@intel.com, haiyue.wang@intel.com, mattias.ronnblom@ericsson.com
Content-Type: text/plain; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

On Mon, Jun 5, 2023 at 4:56=E2=80=AFPM Zhirun Yan <zhirun.yan@intel.com> wr=
ote:
>
> Add graph_mcore_dispatch_wq_node to hold graph scheduling workqueue
> node.
>
> Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> Signed-off-by: Cunming Liang <cunming.liang@intel.com>
> Signed-off-by: Zhirun Yan <zhirun.yan@intel.com>
> ---
>  lib/graph/graph.c                   |  2 ++
>  lib/graph/graph_populate.c          |  1 +
>  lib/graph/graph_private.h           | 12 ++++++++++++
>  lib/graph/rte_graph_worker_common.h | 23 +++++++++++++++++++++++
>  4 files changed, 38 insertions(+)
>
> diff --git a/lib/graph/graph.c b/lib/graph/graph.c
> index 8ce87ae6da..9f107db425 100644
> --- a/lib/graph/graph.c
> +++ b/lib/graph/graph.c
> @@ -289,6 +289,7 @@ rte_graph_model_mcore_dispatch_core_bind(rte_graph_t =
id, int lcore)
>
>         RTE_ASSERT(graph->graph->model =3D=3D RTE_GRAPH_MODEL_MCORE_DISPA=
TCH);
>         graph->lcore_id =3D lcore;
> +       graph->graph->lcore_id =3D graph->lcore_id;
>         graph->socket =3D rte_lcore_to_socket_id(lcore);
>
>         /* check the availability of source node */
> @@ -312,6 +313,7 @@ rte_graph_model_mcore_dispatch_core_unbind(rte_graph_=
t id)
>                         break;
>
>         graph->lcore_id =3D RTE_MAX_LCORE;
> +       graph->graph->lcore_id =3D RTE_MAX_LCORE;
>
>  fail:
>         return;
> diff --git a/lib/graph/graph_populate.c b/lib/graph/graph_populate.c
> index 2c0844ce92..ed596a7711 100644
> --- a/lib/graph/graph_populate.c
> +++ b/lib/graph/graph_populate.c
> @@ -89,6 +89,7 @@ graph_nodes_populate(struct graph *_graph)
>                 }
>                 node->id =3D graph_node->node->id;
>                 node->parent_id =3D pid;
> +               node->dispatch.lcore_id =3D graph_node->node->lcore_id;
>                 nb_edges =3D graph_node->node->nb_edges;
>                 node->nb_edges =3D nb_edges;
>                 off +=3D sizeof(struct rte_node);
> diff --git a/lib/graph/graph_private.h b/lib/graph/graph_private.h
> index 354dc8ac0a..d84174b667 100644
> --- a/lib/graph/graph_private.h
> +++ b/lib/graph/graph_private.h
> @@ -64,6 +64,18 @@ struct node {
>         char next_nodes[][RTE_NODE_NAMESIZE]; /**< Names of next nodes. *=
/
>  };
>
> +/**
> + * @internal
> + *
> + * Structure that holds the graph scheduling workqueue node stream.
> + * Used for mcore dispatch model.
> + */
> +struct graph_mcore_dispatch_wq_node {
> +       rte_graph_off_t node_off;
> +       uint16_t nb_objs;
> +       void *objs[RTE_GRAPH_BURST_SIZE];
> +} __rte_cache_aligned;
> +
>  /**
>   * @internal
>   *
> diff --git a/lib/graph/rte_graph_worker_common.h b/lib/graph/rte_graph_wo=
rker_common.h
> index 72d132bae4..00bcf47ee8 100644
> --- a/lib/graph/rte_graph_worker_common.h
> +++ b/lib/graph/rte_graph_worker_common.h
> @@ -39,6 +39,13 @@ enum rte_graph_worker_model {
>         /**< Dispatch model to support cross-core dispatching within core=
 affinity. */
>  };
>
> +/**
> + * @internal
> + *
> + * Singly-linked list head for graph schedule run-queue.
> + */
> +SLIST_HEAD(rte_graph_rq_head, rte_graph);
> +
>  /**
>   * @internal
>   *
> @@ -50,6 +57,15 @@ struct rte_graph {
>         uint32_t cir_mask;           /**< Circular buffer wrap around mas=
k. */
>         rte_node_t nb_nodes;         /**< Number of nodes in the graph. *=
/
>         rte_graph_off_t *cir_start;  /**< Pointer to circular buffer. */

Please add comment here, End of Fast path variables.


> +       /* Graph schedule */
> +       struct rte_graph_rq_head *rq __rte_cache_aligned; /* The run-queu=
e */
> +       struct rte_graph_rq_head rq_head; /* The head for run-queue list =
*/
> +
> +       SLIST_ENTRY(rte_graph) rq_next;   /* The next for run-queue list =
*/
> +       unsigned int lcore_id;  /**< The graph running Lcore. */
> +       struct rte_ring *wq;    /**< The work-queue for pending streams. =
*/
> +       struct rte_mempool *mp; /**< The mempool for scheduling streams. =
*/
> +       /* Graph schedule area */

Please move above sections to _dispatch_ union.

>         rte_graph_off_t nodes_start; /**< Offset at which node memory sta=
rts. */
>         rte_graph_t id; /**< Graph identifier. */
>         int socket;     /**< Socket ID where memory is allocated. */
> @@ -84,6 +100,13 @@ struct rte_node {
>         /** Original process function when pcap is enabled. */
>         rte_node_process_t original_process;
>
> +       RTE_STD_C11
> +               union {
> +                       /* Fast schedule area for mcore dispatch model */
> +                       struct {
> +                               unsigned int lcore_id;  /**< Node running=
 lcore. */
> +                       } dispatch;
> +               };
>         /* Fast path area  */
>  #define RTE_NODE_CTX_SZ 16
>         uint8_t ctx[RTE_NODE_CTX_SZ] __rte_cache_aligned; /**< Node Conte=
xt. */
> --
> 2.37.2
>