DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
To: Nikhil Rao <nikhil.rao@intel.com>
Cc: ferruh.yigit@intel.com, lei.a.yao@intel.com, dev@dpdk.org,
	stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v2] eventdev: make ethernet port identifiers 16 bit
Date: Thu, 10 May 2018 11:05:40 +0530	[thread overview]
Message-ID: <20180510053539.GA14890@jerin> (raw)
In-Reply-To: <1525899160-169278-1-git-send-email-nikhil.rao@intel.com>

-----Original Message-----
> Date: Thu, 10 May 2018 02:22:40 +0530
> From: Nikhil Rao <nikhil.rao@intel.com>
> To: jerin.jacob@caviumnetworks.com
> CC: ferruh.yigit@intel.com, lei.a.yao@intel.com, dev@dpdk.org, Nikhil Rao
>  <nikhil.rao@intel.com>, stable@dpdk.org
> Subject: [PATCH v2] eventdev: make ethernet port identifiers 16 bit
> X-Mailer: git-send-email 1.8.3.1
> 
> Ethernet port ID data size has been extended to 16 bits size 17.11
> Update the Rx event adapter interface and implementation accordingly.
> 
> Fixes: 9c38b704d280 ("eventdev: add eth Rx adapter implementation")
> Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
> Cc: stable@dpdk.org
> --

Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

I will squash following patch on apply
https://dpdk.org/ml/archives/dev/2018-May/100988.html

> 
> Supersedes the following posts:
> http://dpdk.org/ml/archives/dev/2018-May/100917.html
> http://dpdk.org/ml/archives/dev/2018-May/100426.html
> ---
>  lib/librte_eventdev/rte_event_eth_rx_adapter.h |  4 ++--
>  lib/librte_eventdev/rte_event_eth_rx_adapter.c | 23 ++++++++++++-----------
>  2 files changed, 14 insertions(+), 13 deletions(-)
> 
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> index e6a6435..834eb53 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> @@ -307,7 +307,7 @@ int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
>   *  combination of the two error codes.
>   */
>  int rte_event_eth_rx_adapter_queue_add(uint8_t id,
> -			uint8_t eth_dev_id,
> +			uint16_t eth_dev_id,
>  			int32_t rx_queue_id,
>  			const struct rte_event_eth_rx_adapter_queue_conf *conf);
>  
> @@ -335,7 +335,7 @@ int rte_event_eth_rx_adapter_queue_add(uint8_t id,
>   *  - 0: Success, Receive queue deleted correctly.
>   *  - <0: Error code on failure.
>   */
> -int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
> +int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  				       int32_t rx_queue_id);
>  
>  /**
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> index 4c0c025..6f70509 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> @@ -31,7 +31,7 @@
>   */
>  struct eth_rx_poll_entry {
>  	/* Eth port to poll */
> -	uint8_t eth_dev_id;
> +	uint16_t eth_dev_id;
>  	/* Eth rx queue to poll */
>  	uint16_t eth_rx_qid;
>  };
> @@ -168,7 +168,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
>  
>  	while (1) {
>  		uint16_t q;
> -		uint8_t d;
> +		uint16_t d;
>  
>  		i = (i + 1) % n;
>  		if (i == 0) {
> @@ -190,7 +190,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
>  static int
>  eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
>  {
> -	uint8_t d;
> +	uint16_t d;
>  	uint16_t q;
>  	unsigned int i;
>  
> @@ -510,7 +510,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
>  	for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
>  		unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
>  		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
> -		uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
> +		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
>  
>  		/* Don't do a batch dequeue from the rx queue if there isn't
>  		 * enough space in the enqueue buffer.
> @@ -755,7 +755,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
>  }
>  
>  static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
> -		uint8_t eth_dev_id,
> +		uint16_t eth_dev_id,
>  		int rx_queue_id,
>  		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
>  {
> @@ -859,7 +859,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	struct rte_event_eth_rx_adapter *rx_adapter;
>  	int ret;
>  	int socket_id;
> -	uint8_t i;
> +	uint16_t i;
>  	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
>  	const uint8_t default_rss_key[] = {
>  		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
> @@ -978,7 +978,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  
>  int
>  rte_event_eth_rx_adapter_queue_add(uint8_t id,
> -		uint8_t eth_dev_id,
> +		uint16_t eth_dev_id,
>  		int32_t rx_queue_id,
>  		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
>  {
> @@ -1002,7 +1002,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  						&cap);
>  	if (ret) {
>  		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> -			"eth port %" PRIu8, id, eth_dev_id);
> +			"eth port %" PRIu16, id, eth_dev_id);
>  		return ret;
>  	}
>  
> @@ -1010,7 +1010,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  		&& (queue_conf->rx_queue_flags &
>  			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
>  		RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
> -				" eth port: %" PRIu8 " adapter id: %" PRIu8,
> +				" eth port: %" PRIu16 " adapter id: %" PRIu8,
>  				eth_dev_id, id);
>  		return -EINVAL;
>  	}
> @@ -1018,7 +1018,8 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
>  		(rx_queue_id != -1)) {
>  		RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
> -			"event queue id %u eth port %u", id, eth_dev_id);
> +			"event queue, eth port: %" PRIu16 " adapter id: %"
> +			PRIu8, eth_dev_id, id);
>  		return -EINVAL;
>  	}
>  
> @@ -1075,7 +1076,7 @@ static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  }
>  
>  int
> -rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
> +rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
>  				int32_t rx_queue_id)
>  {
>  	int ret = 0;
> -- 
> 1.8.3.1
> 

  parent reply	other threads:[~2018-05-10  5:36 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-09 19:17 [dpdk-dev] [PATCH] " Nikhil Rao
2018-05-09 20:52 ` [dpdk-dev] [PATCH v2] " Nikhil Rao
2018-05-10  4:31   ` Jerin Jacob
2018-05-10 13:48     ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
2018-05-10 14:30       ` Jerin Jacob
2018-05-10 14:54         ` Thomas Monjalon
2018-05-10 15:16           ` Jerin Jacob
2018-05-10 15:45             ` Thomas Monjalon
2018-05-10 16:11               ` Thomas Monjalon
2018-05-10  5:35   ` Jerin Jacob [this message]
2018-05-10  5:49     ` [dpdk-dev] " Jerin Jacob
2018-05-10  3:16 ` [dpdk-dev] [PATCH] " Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180510053539.GA14890@jerin \
    --to=jerin.jacob@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=lei.a.yao@intel.com \
    --cc=nikhil.rao@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).