From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id D84216CD3 for ; Wed, 14 Sep 2016 08:19:54 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga105.jf.intel.com with ESMTP; 13 Sep 2016 23:19:53 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.30,332,1470726000"; d="scan'208";a="1050001020" Received: from yliu-dev.sh.intel.com (HELO yliu-dev) ([10.239.67.162]) by orsmga002.jf.intel.com with ESMTP; 13 Sep 2016 23:19:52 -0700 Date: Wed, 14 Sep 2016 14:20:21 +0800 From: Yuanhan Liu To: Zhiyong Yang Cc: dev@dpdk.org, thomas.monjalon@6wind.com, mailto: pmatilai@redhat.com Message-ID: <20160914062021.GZ23158@yliu-dev.sh.intel.com> References: <1471608966-39077-1-git-send-email-zhiyong.yang@intel.com> <1473408927-40364-1-git-send-email-zhiyong.yang@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1473408927-40364-1-git-send-email-zhiyong.yang@intel.com> User-Agent: Mutt/1.5.23 (2014-03-12) Subject: Re: [dpdk-dev] [PATCH v2] net/vhost: add pmd xstats X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 14 Sep 2016 06:19:55 -0000 On Fri, Sep 09, 2016 at 04:15:27PM +0800, Zhiyong Yang wrote: > +struct vhost_xstats { > + uint64_t stat[16]; > +}; > + > struct vhost_queue { > int vid; > rte_atomic32_t allow_queuing; > @@ -85,7 +89,8 @@ struct vhost_queue { > uint64_t missed_pkts; > uint64_t rx_bytes; > uint64_t tx_bytes; I'd suggest to put those statistic counters to vhost_stats struct, which could simplify the xstats_reset code a bit. And please do it in two patches, one to introduce vhost_stats, another one to add xstats. > -}; > + struct vhost_xstats xstats; > + }; A format issue here. > > struct pmd_internal { > char *dev_name; > @@ -127,6 +132,274 @@ struct rte_vhost_vring_state { > > static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS]; > > +enum rte_vhostqueue_rxtx { Don't use "rte_" prefix for internal function, vars, etc. "rte_" prefix is reserved for those that will be exported for public use. > + RTE_VHOSTQUEUE_RX = 0, > + RTE_VHOSTQUEUE_TX = 1 > +}; > + > +#define RTE_ETH_VHOST_XSTATS_NAME_SIZE 64 ditto. > + > +struct rte_vhost_xstats_name_off { ditto. > + char name[RTE_ETH_VHOST_XSTATS_NAME_SIZE]; > + uint64_t offset; > +}; > + > +/* [rt]_qX_ is prepended to the name string here */ > +static void > +vhost_dev_xstats_reset(struct rte_eth_dev *dev) > +{ > + struct vhost_queue *vqrx = NULL; > + struct vhost_queue *vqtx = NULL; > + unsigned int i = 0; > + > + for (i = 0; i < dev->data->nb_rx_queues; i++) { > + if (!dev->data->rx_queues[i]) > + continue; > + vqrx = (struct vhost_queue *)dev->data->rx_queues[i]; Unnecessary cast. > + vqrx->rx_pkts = 0; > + vqrx->rx_bytes = 0; > + vqrx->missed_pkts = 0; > + memset(&vqrx->xstats, 0, sizeof(vqrx->xstats)); > + } > + for (i = 0; i < dev->data->nb_tx_queues; i++) { > + if (!dev->data->tx_queues[i]) > + continue; > + vqtx = (struct vhost_queue *)dev->data->tx_queues[i]; > + vqtx->tx_pkts = 0; > + vqtx->tx_bytes = 0; > + vqtx->missed_pkts = 0; > + memset(&vqtx->xstats, 0, sizeof(vqtx->xstats)); > + } > +} > + > +static int > +vhost_dev_xstats_get_names(struct rte_eth_dev *dev, > + struct rte_eth_xstat_name *xstats_names, > + __rte_unused unsigned int limit) The typical way is to put __rte_unused after the key word. > +{ > + unsigned int i = 0; > + unsigned int t = 0; > + int count = 0; > + int nstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS > + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS; > + > + if (xstats_names) { I know you are following virtio pmd style, but you don't have to. I'd suggest to return early for (!xstats_names) case, then we could save one indention level for following code block. > + for (i = 0; i < dev->data->nb_rx_queues; i++) { > + struct vhost_queue *rxvq = dev->data->rx_queues[i]; > + > + if (!rxvq) > + continue; > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) { > + snprintf(xstats_names[count].name, > + sizeof(xstats_names[count].name), > + "rx_q%u_%s", i, > + rte_vhost_rxq_stat_strings[t].name); > + count++; > + } > + } > + for (i = 0; i < dev->data->nb_tx_queues; i++) { > + struct vhost_queue *txvq = dev->data->tx_queues[i]; > + > + if (!txvq) > + continue; > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) { > + snprintf(xstats_names[count].name, > + sizeof(xstats_names[count].name), > + "tx_q%u_%s", i, > + rte_vhost_txq_stat_strings[t].name); > + count++; > + } > + } > + return count; > + } > + return nstats; > +} > + > +static int > +vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, > + unsigned int n) > +{ > + unsigned int i; > + unsigned int t; > + unsigned int count = 0; > + > + unsigned int nxstats = dev->data->nb_rx_queues * VHOST_NB_RXQ_XSTATS > + + dev->data->nb_tx_queues * VHOST_NB_TXQ_XSTATS; > + > + if (n < nxstats) > + return nxstats; > + > + for (i = 0; i < dev->data->nb_rx_queues; i++) { > + struct vhost_queue *rxvq = > + (struct vhost_queue *)dev->data->rx_queues[i]; Unnecessary cast. > + > + if (!rxvq) > + continue; > + > + for (t = 0; t < VHOST_NB_RXQ_XSTATS; t++) { > + xstats[count].value = *(uint64_t *)(((char *)rxvq) > + + rte_vhost_rxq_stat_strings[t].offset); > + count++; > + } > + } > + > + for (i = 0; i < dev->data->nb_tx_queues; i++) { > + struct vhost_queue *txvq = > + (struct vhost_queue *)dev->data->tx_queues[i]; > + > + if (!txvq) > + continue; > + > + for (t = 0; t < VHOST_NB_TXQ_XSTATS; t++) { > + xstats[count].value = *(uint64_t *)(((char *)txvq) > + + rte_vhost_txq_stat_strings[t].offset); > + count++; > + } > + } > + > + return count; > +} > + > +static void > +vhost_update_packet_xstats(struct vhost_queue *vq, > + struct rte_mbuf **bufs, > + uint16_t nb_rxtx, > + uint16_t nb_bufs, > + enum rte_vhostqueue_rxtx vqueue_rxtx) > +{ > + uint32_t pkt_len = 0; > + uint64_t i = 0; > + uint64_t index; > + struct ether_addr *ea = NULL; > + struct vhost_xstats *xstats_update = &vq->xstats; > + > + for (i = 0; i < nb_rxtx ; i++) { > + pkt_len = bufs[i]->pkt_len; > + if (pkt_len == 64) { > + xstats_update->stat[1]++; > + Unnecessary blank line. > + } else if (pkt_len > 64 && pkt_len < 1024) { > + index = (sizeof(pkt_len) * 8) > + - __builtin_clz(pkt_len) - 5; > + xstats_update->stat[index]++; > + } else { > + if (pkt_len < 64) > + xstats_update->stat[0]++; > + else if (pkt_len <= 1522) > + xstats_update->stat[6]++; > + else if (pkt_len > 1522) > + xstats_update->stat[7]++; > + } > + > + ea = rte_pktmbuf_mtod(bufs[i], struct ether_addr *); > + if (is_multicast_ether_addr(ea)) { > + if (is_broadcast_ether_addr(ea)) > + /* broadcast++; */ > + xstats_update->stat[8]++; > + else > + /* multicast++; */ > + xstats_update->stat[9]++; The comment could be avoided if you define a field in vhost_stats struct like "broadcast" or "multicast". I don't object the way Harry proposed tough, to use enum to access the stat array. > + } > + } > + /* non-multi/broadcast, multi/broadcast, including those > + * that were discarded or not sent. Hmmm, I don't follow it. You may want to reword it. > from rfc2863 Which section and which page? --yliu