From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from tama50.ecl.ntt.co.jp (tama50.ecl.ntt.co.jp [129.60.39.147]) by dpdk.org (Postfix) with ESMTP id 2A2F65B3A for ; Wed, 6 Feb 2019 05:00:37 +0100 (CET) Received: from vc1.ecl.ntt.co.jp (vc1.ecl.ntt.co.jp [129.60.86.153]) by tama50.ecl.ntt.co.jp (8.13.8/8.13.8) with ESMTP id x1640bqk009444; Wed, 6 Feb 2019 13:00:37 +0900 Received: from vc1.ecl.ntt.co.jp (localhost [127.0.0.1]) by vc1.ecl.ntt.co.jp (Postfix) with ESMTP id F10A5EA87BF; Wed, 6 Feb 2019 13:00:36 +0900 (JST) Received: from jcms-pop21.ecl.ntt.co.jp (jcms-pop21.ecl.ntt.co.jp [129.60.87.134]) by vc1.ecl.ntt.co.jp (Postfix) with ESMTP id E5C85EA87BC; Wed, 6 Feb 2019 13:00:36 +0900 (JST) Received: from [IPv6:::1] (watercress.nslab.ecl.ntt.co.jp [129.60.13.73]) by jcms-pop21.ecl.ntt.co.jp (Postfix) with ESMTPSA id DC79A4005A2; Wed, 6 Feb 2019 13:00:36 +0900 (JST) References: <20190205114742.24502-1-x-fn-spp@sl.ntt-tx.co.jp> <201902051147.x15BlgMc014227@imss03.silk.ntt-tx.co.jp> From: Yasufumi Ogawa Message-ID: Date: Wed, 6 Feb 2019 12:58:21 +0900 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Thunderbird/52.4.0 MIME-Version: 1.0 In-Reply-To: <201902051147.x15BlgMc014227@imss03.silk.ntt-tx.co.jp> Content-Type: text/plain; charset=utf-8; format=flowed Content-Language: en-US Content-Transfer-Encoding: 7bit X-CC-Mail-RelayStamp: 1 To: x-fn-spp@sl.ntt-tx.co.jp Cc: ferruh.yigit@intel.com, spp@dpdk.org X-TM-AS-MML: disable Subject: Re: [spp] [PATCH 2/5] spp_vf: change to handle multiples component X-BeenThere: spp@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Soft Patch Panel List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 06 Feb 2019 04:00:38 -0000 On 2019/02/05 20:47, x-fn-spp@sl.ntt-tx.co.jp wrote: > From: Hideyuki Yamashita > > So far, slave_main assumes that only one component is assigned for one > core. However there is a case when multiples component are assigned > for one core in core-sharing usecase to reduce cpu core consumption. > This patch changes to handle multiples component per core in > slave_main function. Could you revise comments? > > Signed-off-by: Hideyuki Yamashita > Signed-off-by: Naoki Takada > --- > src/vf/classifier_mac.c | 112 +++++++++++++++------------------------- > 1 file changed, 42 insertions(+), 70 deletions(-) > > diff --git a/src/vf/classifier_mac.c b/src/vf/classifier_mac.c > index cfaf96a..27e2355 100644 > --- a/src/vf/classifier_mac.c > +++ b/src/vf/classifier_mac.c > @@ -1,5 +1,5 @@ > /* SPDX-License-Identifier: BSD-3-Clause > - * Copyright(c) 2017-2018 Nippon Telegraph and Telephone Corporation > + * Copyright(c) 2017-2019 Nippon Telegraph and Telephone Corporation > */ > > #include > @@ -774,7 +774,7 @@ classify_packet(struct rte_mbuf **rx_pkts, uint16_t n_rx, > > /* change update index at classifier management information */ > static inline void > -change_update_index(struct management_info *mng_info, int id) > +change_classifier_index(struct management_info *mng_info, int id) > { > if (unlikely(mng_info->ref_index == > mng_info->upd_index)) { > @@ -815,11 +815,6 @@ spp_classifier_mac_update(struct spp_component_info *component_info) > RTE_LOG(INFO, SPP_CLASSIFIER_MAC, > "Component[%u] Start update component.\n", id); > > - /* wait until no longer access the new update side */ > - while (likely(mng_info->ref_index == > - mng_info->upd_index)) > - rte_delay_us_block(CHANGE_UPDATE_INDEX_WAIT_INTERVAL); > - > cmp_info = mng_info->cmp_infos + mng_info->upd_index; > > /* initialize update side classifier information */ > @@ -833,6 +828,7 @@ spp_classifier_mac_update(struct spp_component_info *component_info) > > /* change index of reference side */ > mng_info->upd_index = mng_info->ref_index; > + mng_info->is_used = 1; > > /* wait until no longer access the new update side */ > while (likely(mng_info->ref_index == > @@ -852,10 +848,8 @@ spp_classifier_mac_update(struct spp_component_info *component_info) > int > spp_classifier_mac_do(int id) > { > - int ret = SPP_RET_NG; > int i; > int n_rx; > - unsigned int lcore_id = rte_lcore_id(); > struct management_info *mng_info = g_mng_infos + id; > struct component_info *cmp_info = NULL; > struct rte_mbuf *rx_pkts[MAX_PKT_BURST]; > @@ -867,76 +861,54 @@ spp_classifier_mac_do(int id) > const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / > US_PER_S * DRAIN_TX_PACKET_INTERVAL; > > - /* initialize */ > - ret = init_classifier(mng_info); > - if (unlikely(ret != SPP_RET_OK)) { > - uninit_classifier(mng_info); > - return ret; > - } > - > - while (likely(spp_get_core_status(lcore_id) == SPP_CORE_FORWARD) && > - likely(spp_check_core_update(lcore_id) == SPP_RET_NG)) { > - /* change index of update side */ > - change_update_index(mng_info, id); > + /* change index of update classifier management information */ > + change_classifier_index(mng_info, id); > > - /** > - * decide classifier information of the current cycle > - * If at least, one rx port, one tx port and one > - * classifier_table exist, then start classifying. > - * If not, stop classifying. > - */ > - cmp_info = mng_info->cmp_infos + mng_info->ref_index; > - clsd_data_rx = &cmp_info->classified_data_rx; > - clsd_data_tx = cmp_info->classified_data_tx; > + /** > + * decide classifier information of the current cycle If at least, > + * one rx port, one tx port and one classifier_table exist, then start > + * classifying. If not, stop classifying. > + */ I thinks checking the condition is not here, but next block. > + cmp_info = mng_info->cmp_infos + mng_info->ref_index; > + clsd_data_rx = &cmp_info->classified_data_rx; > + clsd_data_tx = cmp_info->classified_data_tx; > > - /** > - * Perform condition check if reception/transmission > - * of packet should be done or not > - */ > - if (!(clsd_data_rx->iface_type != UNDEF && > - cmp_info->n_classified_data_tx >= 1 && This commment is nouse if before one is put here. > + /** > + * Perform condition check if reception/transmission > + * of packet should be done or not > + */ > + if (!(clsd_data_rx->iface_type != UNDEF && > + cmp_info->n_classified_data_tx >= 1 && > cmp_info->mac_addr_entry == 1)) > - continue; > + return SPP_RET_OK; > > - /* drain tx packets, if buffer is not filled for interval */ > - cur_tsc = rte_rdtsc(); > - if (unlikely(cur_tsc - prev_tsc > drain_tsc)) { > - for (i = 0; i < cmp_info->n_classified_data_tx; > - i++) { > - if (likely(clsd_data_tx[i].num_pkt == 0)) > - continue; > - > - RTE_LOG(DEBUG, SPP_CLASSIFIER_MAC, > - "transmit packets (drain). " > - "index=%d, " > - "num_pkt=%hu, " > - "interval=%lu\n", > - i, > - clsd_data_tx[i].num_pkt, > - cur_tsc - prev_tsc); > + /* drain tx packets, if buffer is not filled for interval */ > + cur_tsc = rte_rdtsc(); > + if (unlikely(cur_tsc - prev_tsc > drain_tsc)) { > + for (i = 0; i < cmp_info->n_classified_data_tx; i++) { > + if (likely(clsd_data_tx[i].num_pkt == 0)) > + continue; > + > + RTE_LOG(DEBUG, SPP_CLASSIFIER_MAC, > + "transmit packets (drain). index=%d, " > + "num_pkt=%hu, interval=%lu\n", > + i, clsd_data_tx[i].num_pkt, > + cur_tsc - prev_tsc); > transmit_packet(&clsd_data_tx[i]); > - } > - prev_tsc = cur_tsc; > } > - > - if (clsd_data_rx->iface_type == UNDEF) > - continue; > - > - /* retrieve packets */ > - n_rx = spp_eth_rx_burst(clsd_data_rx->port, 0, > - rx_pkts, MAX_PKT_BURST); > - if (unlikely(n_rx == 0)) > - continue; > - > - /* classify and transmit (filled) */ > - classify_packet(rx_pkts, n_rx, cmp_info, clsd_data_tx); > + prev_tsc = cur_tsc; > } > > - /* just in case */ > - change_update_index(mng_info, id); > + if (clsd_data_rx->iface_type == UNDEF) > + return SPP_RET_OK; > + > + /* retrieve packets */ > + n_rx = spp_eth_rx_burst(clsd_data_rx->port, 0, rx_pkts, MAX_PKT_BURST); > + if (unlikely(n_rx == 0)) > + return SPP_RET_OK; > > - /* uninitialize */ > - uninit_classifier(mng_info); > + /* classify and interval that transmit burst packet */ > + classify_packet(rx_pkts, n_rx, cmp_info, clsd_data_tx); > > return SPP_RET_OK; > } > -- Yasufumi Ogawa NTT Network Service Systems Labs