DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xie, Huawei" <huawei.xie@intel.com>
To: "Xie, Huawei" <huawei.xie@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH] examples/vmdq: support i40e in vmdq example
Date: Wed, 24 Sep 2014 11:01:41 +0000	[thread overview]
Message-ID: <C37D651A908B024F974696C65296B57B0F2A7828@SHSMSX101.ccr.corp.intel.com> (raw)
In-Reply-To: <C37D651A908B024F974696C65296B57B0F2A780C@SHSMSX101.ccr.corp.intel.com>


> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xie, Huawei
> Sent: Wednesday, September 24, 2014 6:58 PM
> To: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] examples/vmdq: support i40e in vmdq example
> 
> This patch depends on "[dpdk-dev] [PATCH 0/6] i40e VMDQ support"
> 
> > -----Original Message-----
> > From: Xie, Huawei
> > Sent: Wednesday, September 24, 2014 6:54 PM
> > To: dev@dpdk.org
> > Cc: Xie, Huawei
> > Subject: [PATCH] examples/vmdq: support i40e in vmdq example
> >
> > This patch supports i40e in vmdq example.
> > 1. queue index is added by vmdq queue base in rte_eth_rx_burst.
> > 2. pool index is added by vmdq pool base when mac address is added to pools.
> > 3. add some error message print
> > Besides, due to some limitation in PMD,
> > 1. mac addresses are needed to be pre-allocated to VMDQ pools.
> > 2. ports are started before mac allocation.
> >
> > Signed-off-by: Huawei Xie <huawei.xie@intel.com>
> > Acked-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
> > Acked-by: Jijiang Liu <jijiang.liu@intel.com>
> > Acked-by: Changchun Ouyang <changchun.ouyang.intel.com>
Sorry, there is typo error, changchun.ouyang@intel.com
> > ---
> >  examples/vmdq/main.c | 162 ++++++++++++++++++++++++++++++-------------
> --
> > ------
> >  1 file changed, 97 insertions(+), 65 deletions(-)
> >
> > diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
> > index 35df234..a7ffdef 100644
> > --- a/examples/vmdq/main.c
> > +++ b/examples/vmdq/main.c
> > @@ -194,6 +194,13 @@ const uint16_t vlan_tags[] = {
> >  	48, 49, 50, 51, 52, 53, 54, 55,
> >  	56, 57, 58, 59, 60, 61, 62, 63,
> >  };
> > +const uint16_t num_vlans = RTE_DIM(vlan_tags);
> > +static uint16_t num_pf_queues,  num_vmdq_queues;
> > +static uint16_t vmdq_pool_base, vmdq_queue_base;
> > +/* pool mac addr template, pool mac addr is like: 52 54 00 12 port# pool# */
> > +static struct ether_addr pool_addr_template = {
> > +	.addr_bytes = {0x52, 0x54, 0x00, 0x12, 0x00, 0x00}
> > +};
> >
> >  /* ethernet addresses of ports */
> >  static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
> > @@ -213,22 +220,9 @@ get_eth_conf(struct rte_eth_conf *eth_conf,
> uint32_t
> > num_pools)
> >  	unsigned i;
> >
> >  	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_pools;
> > +	conf.nb_pool_maps = num_pools;
> >  	conf.enable_default_pool = 0;
> >  	conf.default_pool = 0; /* set explicit value, even if not used */
> > -	switch (num_pools) {
> > -	/* For 10G NIC like 82599, 128 is valid for queue number */
> > -	case MAX_POOL_NUM_10G:
> > -		num_queues = MAX_QUEUE_NUM_10G;
> > -		conf.nb_pool_maps = MAX_POOL_MAP_NUM_10G;
> > -		break;
> > -	/* For 1G NIC like i350, 82580 and 82576, 8 is valid for queue number */
> > -	case MAX_POOL_NUM_1G:
> > -		num_queues = MAX_QUEUE_NUM_1G;
> > -		conf.nb_pool_maps = MAX_POOL_MAP_NUM_1G;
> > -		break;
> > -	default:
> > -		return -1;
> > -	}
> >
> >  	for (i = 0; i < conf.nb_pool_maps; i++){
> >  		conf.pool_map[i].vlan_id = vlan_tags[ i ];
> > @@ -242,40 +236,6 @@ get_eth_conf(struct rte_eth_conf *eth_conf,
> uint32_t
> > num_pools)
> >  }
> >
> >  /*
> > - * Validate the pool number accrording to the max pool number gotten form
> > dev_info
> > - * If the pool number is invalid, give the error message and return -1
> > - */
> > -static inline int
> > -validate_num_pools(uint32_t max_nb_pools)
> > -{
> > -	if (num_pools > max_nb_pools) {
> > -		printf("invalid number of pools\n");
> > -		return -1;
> > -	}
> > -
> > -	switch (max_nb_pools) {
> > -	/* For 10G NIC like 82599, 64 is valid for pool number */
> > -	case MAX_POOL_NUM_10G:
> > -		if (num_pools != MAX_POOL_NUM_10G) {
> > -			printf("invalid number of pools\n");
> > -			return -1;
> > -		}
> > -		break;
> > -	/* For 1G NIC like i350, 82580 and 82576, 8 is valid for pool number */
> > -	case MAX_POOL_NUM_1G:
> > -		if (num_pools != MAX_POOL_NUM_1G) {
> > -			printf("invalid number of pools\n");
> > -			return -1;
> > -		}
> > -		break;
> > -	default:
> > -		return -1;
> > -	}
> > -
> > -	return 0;
> > -}
> > -
> > -/*
> >   * Initialises a given port using global settings and with the rx buffers
> >   * coming from the mbuf_pool passed as parameter
> >   */
> > @@ -284,26 +244,55 @@ port_init(uint8_t port, struct rte_mempool
> > *mbuf_pool)
> >  {
> >  	struct rte_eth_dev_info dev_info;
> >  	struct rte_eth_conf port_conf;
> > -	uint16_t rxRings, txRings = (uint16_t)rte_lcore_count();
> > +	uint16_t rxRings, txRings;
> >  	const uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT, txRingSize =
> > RTE_TEST_TX_DESC_DEFAULT;
> >  	int retval;
> >  	uint16_t q;
> > +	uint16_t queues_per_pool;
> >  	uint32_t max_nb_pools;
> >
> >  	/* The max pool number from dev_info will be used to validate the pool
> > number specified in cmd line */
> >  	rte_eth_dev_info_get (port, &dev_info);
> >  	max_nb_pools = (uint32_t)dev_info.max_vmdq_pools;
> > -	retval = validate_num_pools(max_nb_pools);
> > -	if (retval < 0)
> > -		return retval;
> > +	if (num_pools != max_nb_pools) {
> > +		printf("num_pools %d != max_nb_pools %d! Currently we only"
> > +			"support configuring all vmdq pools\n",
> > +			num_pools, max_nb_pools);
> > +		return -1;
> > +	}
> >
> >  	retval = get_eth_conf(&port_conf, num_pools);
> >  	if (retval < 0)
> >  		return retval;
> >
> > +	/*
> > +	 * NIC queues are divided into pf queues and vmdq queues.
> > +	 */
> > +	/* There is assumption here all ports have the same configuration */
> > +	num_pf_queues = dev_info.max_rx_queues -
> > dev_info.vmdq_queue_num;
> > +	queues_per_pool = dev_info.vmdq_queue_num /
> > dev_info.max_vmdq_pools;
> > +	num_vmdq_queues = num_pools * queues_per_pool;
> > +	num_queues = num_pf_queues + num_vmdq_queues;
> > +	vmdq_queue_base = dev_info.vmdq_queue_base;
> > +	vmdq_pool_base  = dev_info.vmdq_pool_base;
> > +
> > +	printf("pf queue num: %u, configured vmdq pool num: %u,"
> > +		" each vmdq pool has %u queues\n",
> > +		num_pf_queues, num_pools, queues_per_pool);
> > +	printf("vmdq queue base: %d pool base %d\n",
> > +		vmdq_queue_base, vmdq_pool_base);
> >  	if (port >= rte_eth_dev_count()) return -1;
> >
> > -	rxRings = (uint16_t)num_queues,
> > +	/*
> > +	 * Though in this example, we only receive packets from the first queue
> > +	 * of each pool and send packets through first rte_lcore_count() tx
> > +	 * queues of vmdq queues, all queues including pf queues are setup.
> > +	 * This is because VMDQ queues doesn't always start from zero, and the
> > +	 * PMD layer doesn't support selectively initialising part of rx/tx
> > +	 * queues well.
> > +	 */
> > +	rxRings = (uint16_t)dev_info.max_rx_queues;
> > +	txRings = (uint16_t)dev_info.max_tx_queues;
> >  	retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
> >  	if (retval != 0)
> >  		return retval;
> > @@ -312,20 +301,26 @@ port_init(uint8_t port, struct rte_mempool
> > *mbuf_pool)
> >  		retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
> >  						rte_eth_dev_socket_id(port),
> > &rx_conf_default,
> >  						mbuf_pool);
> > -		if (retval < 0)
> > +		if (retval < 0) {
> > +			printf("initialise rx queue %d failed\n", q);
> >  			return retval;
> > +		}
> >  	}
> >
> >  	for (q = 0; q < txRings; q ++) {
> >  		retval = rte_eth_tx_queue_setup(port, q, txRingSize,
> >  						rte_eth_dev_socket_id(port),
> > &tx_conf_default);
> > -		if (retval < 0)
> > +		if (retval < 0) {
> > +			printf("initialise tx queue %d failed\n", q);
> >  			return retval;
> > +		}
> >  	}
> >
> >  	retval  = rte_eth_dev_start(port);
> > -	if (retval < 0)
> > +	if (retval < 0) {
> > +		printf("port %d start failed\n", port);
> >  		return retval;
> > +	}
> >
> >  	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
> >  	printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
> > @@ -338,6 +333,25 @@ port_init(uint8_t port, struct rte_mempool
> > *mbuf_pool)
> >  			vmdq_ports_eth_addr[port].addr_bytes[4],
> >  			vmdq_ports_eth_addr[port].addr_bytes[5]);
> >
> > +	/* Set mac for each pool */
> > +	for (q = 0; q < num_pools; q++) {
> > +		struct ether_addr mac;
> > +		mac = pool_addr_template;
> > +		mac.addr_bytes[4] = port;
> > +		mac.addr_bytes[5] = q;
> > +		printf("Port %u vmdq pool %u set
> > mac %02x:%02x:%02x:%02x:%02x:%02x\n",
> > +			port, q,
> > +			mac.addr_bytes[0], mac.addr_bytes[1],
> > +			mac.addr_bytes[2], mac.addr_bytes[3],
> > +			mac.addr_bytes[4], mac.addr_bytes[5]);
> > +		retval = rte_eth_dev_mac_addr_add(port, &mac,
> > +				q + vmdq_pool_base);
> > +		if (retval) {
> > +			printf("mac addr add failed at pool %d\n", q);
> > +			return retval;
> > +		}
> > +	}
> > +
> >  	return 0;
> >  }
> >
> > @@ -353,6 +367,11 @@ vmdq_parse_num_pools(const char *q_arg)
> >  	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
> >  		return -1;
> >
> > +	if (num_pools > num_vlans) {
> > +		printf("num_pools %d > num_vlans %d\n", num_pools,
> > num_vlans);
> > +		return -1;
> > +	}
> > +
> >  	num_pools = n;
> >
> >  	return 0;
> > @@ -481,7 +500,7 @@ lcore_main(__attribute__((__unused__)) void* dummy)
> >  	uint16_t core_id = 0;
> >  	uint16_t startQueue, endQueue;
> >  	uint16_t q, i, p;
> > -	const uint16_t remainder = (uint16_t)(num_queues % num_cores);
> > +	const uint16_t remainder = (uint16_t)(num_vmdq_queues % num_cores);
> >
> >  	for (i = 0; i < num_cores; i ++)
> >  		if (lcore_ids[i] == lcore_id) {
> > @@ -491,17 +510,27 @@ lcore_main(__attribute__((__unused__)) void*
> > dummy)
> >
> >  	if (remainder != 0) {
> >  		if (core_id < remainder) {
> > -			startQueue = (uint16_t)(core_id *
> > (num_queues/num_cores + 1));
> > -			endQueue = (uint16_t)(startQueue +
> > (num_queues/num_cores) + 1);
> > +			startQueue = (uint16_t)(core_id *
> > +					(num_vmdq_queues / num_cores + 1));
> > +			endQueue = (uint16_t)(startQueue +
> > +					(num_vmdq_queues / num_cores) + 1);
> >  		} else {
> > -			startQueue = (uint16_t)(core_id *
> > (num_queues/num_cores) + remainder);
> > -			endQueue = (uint16_t)(startQueue +
> > (num_queues/num_cores));
> > +			startQueue = (uint16_t)(core_id *
> > +					(num_vmdq_queues / num_cores) +
> > +					remainder);
> > +			endQueue = (uint16_t)(startQueue +
> > +					(num_vmdq_queues / num_cores));
> >  		}
> >  	} else {
> > -		startQueue = (uint16_t)(core_id * (num_queues/num_cores));
> > -		endQueue = (uint16_t)(startQueue + (num_queues/num_cores));
> > +		startQueue = (uint16_t)(core_id *
> > +				(num_vmdq_queues / num_cores));
> > +		endQueue = (uint16_t)(startQueue +
> > +				(num_vmdq_queues / num_cores));
> >  	}
> >
> > +	/* vmdq queue idx doesn't always start from zero.*/
> > +	startQueue += vmdq_queue_base;
> > +	endQueue   += vmdq_queue_base;
> >  	printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
> >  		(unsigned)lcore_id, startQueue, endQueue - 1);
> >
> > @@ -533,8 +562,11 @@ lcore_main(__attribute__((__unused__)) void*
> dummy)
> >  				for (i = 0; i < rxCount; i++)
> >  					update_mac_address(buf[i], dport);
> >
> > -				const uint16_t txCount =
> > rte_eth_tx_burst(dport,
> > -					core_id, buf, rxCount);
> > +				const uint16_t txCount = rte_eth_tx_burst(
> > +					dport,
> > +					vmdq_queue_base + core_id,
> > +					buf,
> > +					rxCount);
> >
> >  				if (txCount != rxCount) {
> >  					for (i = txCount; i < rxCount; i++)
> > --
> > 1.8.1.4

  reply	other threads:[~2014-09-24 10:55 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-24 10:53 Huawei Xie
2014-09-24 10:53 ` Huawei Xie
2014-09-24 10:58   ` Xie, Huawei
2014-09-24 11:01     ` Xie, Huawei [this message]
2014-10-21  3:30 ` Cao, Min

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=C37D651A908B024F974696C65296B57B0F2A7828@SHSMSX101.ccr.corp.intel.com \
    --to=huawei.xie@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).