DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ferruh Yigit <ferruh.yigit@intel.com>
To: John Miller <john.miller@atomicrules.com>, <dev@dpdk.org>
Subject: Re: [PATCH 1/4] net/ark: add device capabilities record
Date: Wed, 26 Jan 2022 16:45:48 +0000	[thread overview]
Message-ID: <770f9da5-fa68-8c47-5794-e5d99548a4c2@intel.com> (raw)
In-Reply-To: <20220119191255.273988-1-john.miller@atomicrules.com>

On 1/19/2022 7:12 PM, John Miller wrote:
> Add static record of supported device capabilities.
> 
> Signed-off-by: John Miller <john.miller@atomicrules.com>
> ---
>   drivers/net/ark/ark_ethdev.c | 58 +++++++++++++++++++++++++++++-------
>   1 file changed, 48 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
> index b618cba3f0..0414c78bb5 100644
> --- a/drivers/net/ark/ark_ethdev.c
> +++ b/drivers/net/ark/ark_ethdev.c
> @@ -96,6 +96,26 @@ static const struct rte_pci_id pci_id_ark_map[] = {
>   	{.vendor_id = 0, /* sentinel */ },
>   };
>   
> +struct ark_caps {
> +	bool rqpacing;

Can you please put some comment that what this 'rqpacing' capability is?
Either to commit log, or as a comment to the code, or both.

> +};
> +struct ark_dev_caps {
> +	uint32_t  device_id;
> +	struct ark_caps  caps;
> +};
> +static const struct ark_dev_caps
> +ark_device_caps[] = {
> +		     {0x100d, {.rqpacing = true} },
> +		     {0x100e, {.rqpacing = true} },
> +		     {0x100f, {.rqpacing = true} },
> +		     {0x1010, {.rqpacing = false} },
> +		     {0x1017, {.rqpacing = true} },
> +		     {0x1018, {.rqpacing = true} },
> +		     {0x1019, {.rqpacing = true} },
> +		     {0x101e, {.rqpacing = false} },

This device, 0x101e, even not probed, it looks odd to keep capability for it.

And it will increase the readability if you can use macros for device/vendor ID.

> +		     {.device_id = 0,}
> +};
> +
>   static int
>   eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
>   		struct rte_pci_device *pci_dev)
> @@ -256,6 +276,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
>   	int ret;
>   	int port_count = 1;
>   	int p;
> +	bool rqpacing = false;
>   
>   	ark->eth_dev = dev;
>   
> @@ -270,6 +291,15 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
>   	rte_eth_copy_pci_info(dev, pci_dev);
>   	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
>   
> +	p = 0;
> +	while (ark_device_caps[p].device_id != 0) {
> +		if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
> +			rqpacing = ark_device_caps[p].caps.rqpacing;
> +			break;
> +		}
> +		p++;
> +	}
> +
>   	/* Use dummy function until setup */
>   	dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
>   	dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
> @@ -288,8 +318,12 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
>   	ark->pktgen.v  = (void *)&ark->bar0[ARK_PKTGEN_BASE];
>   	ark->pktchkr.v  = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
>   
> -	ark->rqpacing =
> -		(struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
> +	if (rqpacing) {
> +		ark->rqpacing =
> +			(struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
> +	} else {
> +		ark->rqpacing = NULL;
> +	}
>   	ark->started = 0;
>   	ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL;
>   
> @@ -309,13 +343,15 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
>   		return -1;
>   	}
>   	if (ark->sysctrl.t32[3] != 0) {
> -		if (ark_rqp_lasped(ark->rqpacing)) {
> -			ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
> -				    "Timer has Expired\n");
> -			return -1;
> +		if (ark->rqpacing) {
> +			if (ark_rqp_lasped(ark->rqpacing)) {
> +				ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
> +					    "Timer has Expired\n");
> +				return -1;
> +			}
> +			ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
> +				    "Timer is Running\n");
>   		}
> -		ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
> -			    "Timer is Running\n");
>   	}
>   
>   	ARK_PMD_LOG(DEBUG,
> @@ -499,7 +535,8 @@ ark_config_device(struct rte_eth_dev *dev)
>   	ark_ddm_stats_reset(ark->ddm.v);
>   
>   	ark_ddm_stop(ark->ddm.v, 0);
> -	ark_rqp_stats_reset(ark->rqpacing);
> +	if (ark->rqpacing)
> +		ark_rqp_stats_reset(ark->rqpacing);
>   
>   	return 0;
>   }
> @@ -695,7 +732,8 @@ eth_ark_dev_close(struct rte_eth_dev *dev)
>   	/*
>   	 * TODO This should only be called once for the device during shutdown
>   	 */
> -	ark_rqp_dump(ark->rqpacing);
> +	if (ark->rqpacing)
> +		ark_rqp_dump(ark->rqpacing);
>   
>   	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>   		eth_ark_tx_queue_release(dev->data->tx_queues[i]);


      parent reply	other threads:[~2022-01-26 16:46 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-19 19:12 John Miller
2022-01-19 19:12 ` [PATCH 2/4] net/ark: support arbitrary mbuf size John Miller
2022-01-26 16:46   ` Ferruh Yigit
2022-01-19 19:12 ` [PATCH 3/4] net/ark: publish include file for external access John Miller
2022-01-26 16:48   ` Ferruh Yigit
2022-01-26 16:49     ` Ferruh Yigit
2022-01-19 19:12 ` [PATCH 4/4] net/ark: support chunk DMA transfers John Miller
2022-01-26 16:52   ` Ferruh Yigit
2022-01-26 16:45 ` Ferruh Yigit [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=770f9da5-fa68-8c47-5794-e5d99548a4c2@intel.com \
    --to=ferruh.yigit@intel.com \
    --cc=dev@dpdk.org \
    --cc=john.miller@atomicrules.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).