* [PATCH 01/17] bus/fslmc: update MC to 10.29
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 02/17] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
` (17 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta, Gagandeep Singh
From: Nipun Gupta <nipun.gupta@nxp.com>
update MC firmware library version to 10.29
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
drivers/net/dpaa2/mc/dpdmux.c | 8 ++
drivers/net/dpaa2/mc/dpkg.c | 7 +-
drivers/net/dpaa2/mc/dpni.c | 111 ++++++++++++++++++++------
drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
drivers/net/dpaa2/mc/fsl_dpni.h | 54 ++++++++++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 57 +++++++------
8 files changed, 181 insertions(+), 66 deletions(-)
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index 7e9bd96429..073d47efbf 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 28
+#define MC_VER_MINOR 29
/**
* struct mc_version
diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
index edbb01b45b..1bb153cad7 100644
--- a/drivers/net/dpaa2/mc/dpdmux.c
+++ b/drivers/net/dpaa2/mc/dpdmux.c
@@ -398,6 +398,9 @@ int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
attr->mem_size = le16_to_cpu(rsp_params->mem_size);
attr->default_if = le16_to_cpu(rsp_params->default_if);
+ attr->max_dmat_entries = le16_to_cpu(rsp_params->max_dmat_entries);
+ attr->max_mc_groups = le16_to_cpu(rsp_params->max_mc_groups);
+ attr->max_vlan_ids = le16_to_cpu(rsp_params->max_vlan_ids);
return 0;
}
@@ -470,6 +473,11 @@ int dpdmux_if_disable(struct fsl_mc_io *mc_io,
* will be updated with the minimum value of the mfls of the connected
* dpnis and the actual value of dmux mfl.
*
+ * If dpdmux object is created using DPDMUX_OPT_AUTO_MAX_FRAME_LEN and maximum
+ * frame length is changed for a dpni connected to dpdmux interface the change
+ * is propagated through dpdmux interfaces and will overwrite the value set using
+ * this API.
+ *
* Return: '0' on Success; Error code otherwise.
*/
int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
index 1e171eedc7..4789976b7d 100644
--- a/drivers/net/dpaa2/mc/dpkg.c
+++ b/drivers/net/dpaa2/mc/dpkg.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
- * Copyright 2017 NXP
+ * Copyright 2017-2021 NXP
*
*/
#include <fsl_mc_sys.h>
@@ -63,10 +63,7 @@ dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
cfg->extracts[i].type);
- if (extr->num_of_byte_masks > DPKG_NUM_OF_MASKS)
- return -EINVAL;
-
- for (j = 0; j < extr->num_of_byte_masks; j++) {
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
extr->masks[j].offset =
cfg->extracts[i].masks[j].offset;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 60048d6c43..cf78295d90 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -128,6 +128,7 @@ int dpni_create(struct fsl_mc_io *mc_io,
cmd_params->num_cgs = cfg->num_cgs;
cmd_params->num_opr = cfg->num_opr;
cmd_params->dist_key_size = cfg->dist_key_size;
+ cmd_params->num_channels = cfg->num_channels;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -203,7 +204,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
cmd_params->pool_options = cfg->pool_options;
- for (i = 0; i < cmd_params->num_dpbp; i++) {
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
@@ -592,6 +593,7 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
attr->num_tx_tcs = rsp_params->num_tx_tcs;
attr->mac_filter_entries = rsp_params->mac_filter_entries;
attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->num_channels = rsp_params->num_channels;
attr->qos_entries = rsp_params->qos_entries;
attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
attr->qos_key_size = rsp_params->qos_key_size;
@@ -815,6 +817,9 @@ int dpni_get_offload(struct fsl_mc_io *mc_io,
* in all enqueue operations
*
* Return: '0' on Success; Error code otherwise.
+ *
+ * If dpni object is created using multiple Tc channels this function will return
+ * qdid value for the first channel
*/
int dpni_get_qdid(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -958,7 +963,12 @@ int dpni_get_link_state(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @tx_cr_shaper: TX committed rate shaping configuration
* @tx_er_shaper: TX excess rate shaping configuration
- * @coupled: Committed and excess rate shapers are coupled
+ * @param: Special parameters
+ * bit0: Committed and excess rates are coupled
+ * bit1: 1 modify LNI shaper, 0 modify channel shaper
+ * bit8-15: Tx channel to be shaped. Used only if bit1 is set to zero
+ * bits16-26: OAL (Overhead accounting length 11bit value). Used only
+ * when bit1 is set.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -967,10 +977,13 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled)
+ uint32_t param)
{
struct dpni_cmd_set_tx_shaping *cmd_params;
struct mc_command cmd = { 0 };
+ int coupled, lni_shaper;
+ uint8_t channel_id;
+ uint16_t oal;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
@@ -985,7 +998,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
cpu_to_le32(tx_cr_shaper->rate_limit);
cmd_params->tx_er_rate_limit =
cpu_to_le32(tx_er_shaper->rate_limit);
- dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ coupled = !!(param & 0x01);
+ dpni_set_field(cmd_params->options, COUPLED, coupled);
+
+ lni_shaper = !!((param >> 1) & 0x01);
+ dpni_set_field(cmd_params->options, LNI_SHAPER, lni_shaper);
+
+ channel_id = (param >> 8) & 0xff;
+ cmd_params->channel_id = channel_id;
+
+ oal = (param >> 16) & 0x7FF;
+ cmd_params->oal = cpu_to_le16(oal);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1543,6 +1567,7 @@ int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
+ cmd_params->channel_idx = cfg->channel_idx;
dpni_set_field(cmd_params->flags,
SEPARATE_GRP,
cfg->separate_groups);
@@ -2053,7 +2078,13 @@ void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
* with the early-drop configuration by calling dpni_prepare_early_drop()
*
@@ -2066,7 +2097,7 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2078,7 +2109,8 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2091,7 +2123,13 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
*
* warning: After calling this function, call dpni_extract_early_drop() to
@@ -2103,7 +2141,7 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2115,7 +2153,8 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2138,8 +2177,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
@@ -2151,7 +2190,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = (uint8_t)cfg->cgid;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
@@ -2179,7 +2219,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel. Bits[0-7] contain traaffic class,
+ * byte[8-15] contains channel id
* @cfg: congestion notification configuration
*
* Return: '0' on Success; error code otherwise.
@@ -2188,8 +2229,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_rsp_get_congestion_notification *rsp_params;
struct dpni_cmd_get_congestion_notification *cmd_params;
@@ -2203,7 +2244,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = cfg->cgid;
@@ -2280,7 +2322,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue)
@@ -2294,7 +2336,8 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->options = options;
cmd_params->dest_id = cpu_to_le32(queue->destination.id);
@@ -2317,7 +2360,13 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - all queue types are supported
- * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @index: Selects the specific queue out of the set allocated for the
* same TC. Value must be in range 0 to NUM_QUEUES - 1
* @queue: Queue configuration structure
@@ -2329,7 +2378,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid)
@@ -2345,8 +2394,9 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
cmd_params->index = index;
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
@@ -2382,8 +2432,16 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
* DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
- * @param: Custom parameter for some pages used to select
- * a certain statistic source, for example the TC.
+ * @param: Custom parameter for some pages used to select
+ * a certain statistic source, for example the TC.
+ * - page_0: not used
+ * - page_1: not used
+ * - page_2: not used
+ * - page_3: high_byte - channel_id, low_byte - traffic class
+ * - page_4: high_byte - queue_index have meaning only if dpni is
+ * created using option DPNI_OPT_CUSTOM_CG, low_byte - traffic class
+ * - page_5: not used
+ * - page_6: not used
* @stat: Structure containing the statistics
*
* Return: '0' on Success; Error code otherwise.
@@ -2471,7 +2529,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_taildrop *taildrop)
{
@@ -2485,7 +2543,8 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
cmd_params->congestion_point = cg_point;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->units = taildrop->units;
cmd_params->threshold = cpu_to_le32(taildrop->threshold);
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
index b01a98eb59..4600ea94d4 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
@@ -184,6 +184,9 @@ struct dpdmux_attr {
uint16_t num_ifs;
uint16_t mem_size;
uint16_t default_if;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
index f8a1b5b1ae..bf6b8a20d1 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
@@ -35,7 +35,7 @@
#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
-#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V2(0x004)
+#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V3(0x004)
#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
@@ -119,6 +119,9 @@ struct dpdmux_rsp_get_attr {
uint32_t pad2;
uint64_t options;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
struct dpdmux_cmd_set_max_frame_length {
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 469ab9b3d4..8aead28261 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -36,6 +36,10 @@ struct fsl_mc_io;
* Maximum number of storage-profiles per DPNI
*/
#define DPNI_MAX_SP 2
+/**
+ * Maximum number of Tx channels per DPNI
+ */
+#define DPNI_MAX_CHANNELS 16
/**
* All traffic classes considered; see dpni_set_queue()
@@ -117,6 +121,13 @@ struct fsl_mc_io;
*/
#define DPNI_SW_SEQUENCE_LAYOUT_SIZE 33
+/**
+ * Build a parameter from dpni channel and trafiic class. This parameter
+ * will be used to configure / query information from dpni objects created
+ * to support multiple channels.
+ */
+#define DPNI_BUILD_PARAM(channel, tc_id) (((channel) << 8) | (tc_id))
+
int dpni_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpni_id,
@@ -187,6 +198,8 @@ int dpni_close(struct fsl_mc_io *mc_io,
* field is ignored if the DPNI has a single TC. Otherwise,
* a value of 0 defaults to 64. Maximum supported value
* is 64.
+ * @num_channels: Number of egress channels used by this dpni object. If
+ * set to zero the dpni object will use a single CEETM channel.
*/
struct dpni_cfg {
uint32_t options;
@@ -200,6 +213,7 @@ struct dpni_cfg {
uint8_t num_cgs;
uint16_t num_opr;
uint8_t dist_key_size;
+ uint8_t num_channels;
};
int dpni_create(struct fsl_mc_io *mc_io,
@@ -362,6 +376,7 @@ struct dpni_attr {
uint8_t fs_key_size;
uint16_t wriop_version;
uint8_t num_cgs;
+ uint8_t num_channels;
};
int dpni_get_attributes(struct fsl_mc_io *mc_io,
@@ -779,12 +794,29 @@ struct dpni_tx_shaping_cfg {
uint16_t max_burst_size;
};
+/**
+ * Build the parameter for dpni_set_tx_shaping() call
+ * @oal: Overhead accounting length. 11bit value added to the size of
+ * each frame. Used only for LNI shaping. If set to zero, will use default
+ * value of 24. Ignored if shaping_lni is set to zero.
+ * @shaping_lni: 1 for LNI shaping (configure whole throughput of the dpni object)
+ * 0 for channel shaping (configure shaping for individual channels)
+ * Set to one only if dpni is connected to a dpmac object.
+ * @channel_id: Channel to be configured. Ignored shaping_lni is set to 1
+ * @coupled: Committed and excess rates are coupled
+ */
+#define DPNI_TX_SHAPING_PARAM(oal, shaping_lni, channel_id, coupled) ( \
+ ((uint32_t)(((oal) & 0x7ff) << 16)) | \
+ ((uint32_t)((channel_id) & 0xff) << 8) | \
+ ((uint32_t)(!!shaping_lni) << 1) | \
+ ((uint32_t)!!coupled))
+
int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled);
+ uint32_t param);
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -918,12 +950,14 @@ struct dpni_tx_schedule_cfg {
/**
* struct dpni_tx_priorities_cfg - Structure representing transmission
* priorities for DPNI TCs
+ * @channel_idx: channel to perform the configuration
* @tc_sched: An array of traffic-classes
* @prio_group_A: Priority of group A
* @prio_group_B: Priority of group B
* @separate_groups: Treat A and B groups as separate
*/
struct dpni_tx_priorities_cfg {
+ uint8_t channel_idx;
struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
uint32_t prio_group_A;
uint32_t prio_group_B;
@@ -1155,14 +1189,14 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
/**
@@ -1290,15 +1324,15 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg);
int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg);
/* DPNI FLC stash options */
@@ -1590,7 +1624,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue);
@@ -1599,7 +1633,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid);
@@ -1643,7 +1677,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type q_type,
- uint8_t tc,
+ uint16_t param,
uint8_t q_index,
struct dpni_taildrop *taildrop);
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 6fbd93bb38..8bff2ec9af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -8,14 +8,15 @@
#define _FSL_DPNI_CMD_H
/* DPNI Version */
-#define DPNI_VER_MAJOR 7
-#define DPNI_VER_MINOR 17
+#define DPNI_VER_MAJOR 8
+#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_VERSION_2 2
#define DPNI_CMD_VERSION_3 3
#define DPNI_CMD_VERSION_4 4
#define DPNI_CMD_VERSION_5 5
+#define DPNI_CMD_VERSION_6 6
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
@@ -23,17 +24,18 @@
#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3)
#define DPNI_CMD_V4(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_4)
#define DPNI_CMD_V5(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_5)
+#define DPNI_CMD_V6(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_6)
/* Command IDs */
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-#define DPNI_CMDID_CREATE DPNI_CMD_V5(0x901)
+#define DPNI_CMDID_CREATE DPNI_CMD_V6(0x901)
#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
-#define DPNI_CMDID_GET_ATTR DPNI_CMD_V3(0x004)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD_V4(0x004)
#define DPNI_CMDID_RESET DPNI_CMD(0x005)
#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
@@ -54,7 +56,7 @@
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A)
-#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V3(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
@@ -83,25 +85,25 @@
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
-#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
+#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V3(0x250)
#define DPNI_CMDID_GET_RX_TC_POLICING DPNI_CMD(0x251)
-#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D)
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V4(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
-#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F)
-#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V2(0x260)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V3(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V3(0x260)
#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261)
-#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V3(0x262)
#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264)
#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265)
-#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x267)
-#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x268)
-#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269)
-#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V3(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V3(0x26A)
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
@@ -136,7 +138,7 @@ struct dpni_cmd_create {
uint8_t num_queues;
uint8_t num_tcs;
uint8_t mac_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t vlan_filter_entries;
uint8_t pad2;
uint8_t qos_entries;
@@ -230,7 +232,7 @@ struct dpni_rsp_get_attr {
uint8_t num_tx_tcs;
/* response word 1 */
uint8_t vlan_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t qos_entries;
uint8_t pad2;
uint16_t fs_entries;
@@ -367,6 +369,8 @@ struct dpni_rsp_get_link_state {
#define DPNI_COUPLED_SHIFT 0
#define DPNI_COUPLED_SIZE 1
+#define DPNI_LNI_SHAPER_SHIFT 1
+#define DPNI_LNI_SHAPER_SIZE 1
struct dpni_cmd_set_tx_shaping {
uint16_t tx_cr_max_burst_size;
@@ -374,8 +378,10 @@ struct dpni_cmd_set_tx_shaping {
uint32_t pad;
uint32_t tx_cr_rate_limit;
uint32_t tx_er_rate_limit;
- /* from LSB: coupled:1 */
- uint8_t coupled;
+ /* from LSB: coupled:1, lni_shaper: 1*/
+ uint8_t options;
+ uint8_t channel_id;
+ uint16_t oal;
};
struct dpni_cmd_set_max_frame_length {
@@ -466,7 +472,8 @@ struct dpni_cmd_set_tx_priorities {
uint16_t flags;
uint8_t prio_group_A;
uint8_t prio_group_B;
- uint32_t pad0;
+ uint8_t channel_idx;
+ uint8_t pad0[3];
uint8_t modes[4];
uint32_t pad1;
uint64_t pad2;
@@ -499,6 +506,7 @@ struct dpni_cmd_get_queue {
uint8_t qtype;
uint8_t tc;
uint8_t index;
+ uint8_t channel_id;
};
#define DPNI_DEST_TYPE_SHIFT 0
@@ -551,6 +559,7 @@ struct dpni_cmd_set_queue {
uint64_t user_context;
/* cmd word 4 */
uint8_t cgid;
+ uint8_t channel_id;
};
#define DPNI_DISCARD_ON_MISS_SHIFT 0
@@ -683,7 +692,8 @@ struct dpni_early_drop {
struct dpni_cmd_early_drop {
uint8_t qtype;
uint8_t tc;
- uint8_t pad[6];
+ uint8_t channel_id;
+ uint8_t pad[5];
uint64_t early_drop_iova;
};
@@ -723,7 +733,8 @@ struct dpni_cmd_set_taildrop {
uint8_t qtype;
uint8_t tc;
uint8_t index;
- uint32_t pad0;
+ uint8_t channel_id;
+ uint8_t pad0[3];
/* cmd word 1 */
/* from LSB: enable:1 oal_lo:7 */
uint8_t enable_oal_lo;
@@ -747,7 +758,7 @@ struct dpni_tx_confirmation_mode {
struct dpni_cmd_set_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
uint8_t pad2[3];
@@ -765,7 +776,7 @@ struct dpni_cmd_set_congestion_notification {
struct dpni_cmd_get_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
};
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 02/17] bus/fslmc: use dmb oshst for synchronization before I/O
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
2021-12-06 12:18 ` [PATCH 01/17] bus/fslmc: update MC to 10.29 nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 03/17] net/dpaa2: warn user in case of high nb desc nipun.gupta
` (16 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
Outer Shareable Store (oshst) is sufficient for Data Memory
Barrier (dmb) when doing IO on the interface via QBMAN.
This will sync L3/DDR with the L1/L2 cached data.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/qbman/include/compat.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index a4471a80af..ece5da5906 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
+ * Copyright 2017,2021 NXP
*
*/
@@ -81,7 +81,7 @@ do { \
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
-#define dma_wmb() rte_smp_mb()
+#define dma_wmb() rte_io_wmb()
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 03/17] net/dpaa2: warn user in case of high nb desc
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
2021-12-06 12:18 ` [PATCH 01/17] bus/fslmc: update MC to 10.29 nipun.gupta
2021-12-06 12:18 ` [PATCH 02/17] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 04/17] net/dpaa2: fix unregistering interrupt handler nipun.gupta
` (15 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Rohit Raj
From: Rohit Raj <rohit.raj@nxp.com>
Added warning message if application is configuring nb_desc
more than supported by PEB memory suggesting user to configure
HW descriptors in normal memory rather than in faster PEB
memory.
Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a3706439d5..f5cac8f9d9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -74,6 +74,9 @@ int dpaa2_timestamp_dynfield_offset = -1;
/* Enable error queue */
bool dpaa2_enable_err_queue;
+#define MAX_NB_RX_DESC 11264
+int total_nb_rx_desc;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -694,6 +697,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
dev, rx_queue_id, mb_pool, rx_conf);
+ total_nb_rx_desc += nb_rx_desc;
+ if (total_nb_rx_desc > MAX_NB_RX_DESC) {
+ DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
+ MAX_NB_RX_DESC);
+ DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
+ }
+
/* Rx deferred start is not supported */
if (rx_conf->rx_deferred_start) {
DPAA2_PMD_ERR("%p:Rx deferred start not supported",
@@ -984,6 +994,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
memset(&cfg, 0, sizeof(struct dpni_queue));
PMD_INIT_FUNC_TRACE();
+
+ total_nb_rx_desc -= dpaa2_q->nb_desc;
+
if (dpaa2_q->cgid != 0xff) {
options = DPNI_QUEUE_OPT_CLEAR_CGID;
cfg.cgid = dpaa2_q->cgid;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 04/17] net/dpaa2: fix unregistering interrupt handler
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (2 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 03/17] net/dpaa2: warn user in case of high nb desc nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 05/17] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
` (14 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
This patch fixes code that handles unregistering LSC
interrupt handler in dpaa2_dev_stop API.
Fixes: c5acbb5ea20e ("net/dpaa2: support link status event")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index f5cac8f9d9..18ff07249f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1265,7 +1265,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret;
struct rte_eth_link link;
- struct rte_intr_handle *intr_handle = dev->intr_handle;
+ struct rte_device *rdev = dev->device;
+ struct rte_intr_handle *intr_handle;
+ struct rte_dpaa2_device *dpaa2_dev;
+
+ dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+ intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 05/17] net/dpaa2: fix timestamping for IEEE1588
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (3 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 04/17] net/dpaa2: fix unregistering interrupt handler nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 06/17] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
` (13 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
The current implementation of DPAA2 driver code is such
that it records Rx and Tx timestamp for PTP without checking
if they are PTP packets or not. Packets for which
RTE_MBUF_F_RX_IEEE1588_TMST and RTE_MBUF_F_TX_IEEE1588_TMST
is not set, Rx and Tx timestamp should not be recorded.
This patch fixes this issue by checking if the required
flags are set in the mbuf before recording timestamps.
Also this change defines separate values for
DPAA2_TX_CONF_ENABLE and DPAA2_NO_PREFETCH_RX
Fixes: e806bf878c17 ("net/dpaa2: support timestamp")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.h | 2 +-
drivers/net/dpaa2/dpaa2_ptp.c | 8 ++++---
drivers/net/dpaa2/dpaa2_rxtx.c | 39 +++++++++++++++++++++++++-------
3 files changed, 37 insertions(+), 12 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c5e9267bf0..c21571e63d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -62,7 +62,7 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
/* Tx confirmation enabled */
-#define DPAA2_TX_CONF_ENABLE 0x08
+#define DPAA2_TX_CONF_ENABLE 0x06
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
diff --git a/drivers/net/dpaa2/dpaa2_ptp.c b/drivers/net/dpaa2/dpaa2_ptp.c
index 8d79e39244..3a4536dd69 100644
--- a/drivers/net/dpaa2/dpaa2_ptp.c
+++ b/drivers/net/dpaa2/dpaa2_ptp.c
@@ -111,10 +111,12 @@ int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- if (priv->next_tx_conf_queue)
- dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
- else
+ if (priv->next_tx_conf_queue) {
+ while (!priv->tx_timestamp)
+ dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
+ } else {
return -1;
+ }
*timestamp = rte_ns_to_timespec(priv->tx_timestamp);
return 0;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index c65589a5f3..ee3ed1b152 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -140,8 +140,10 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
annotation->word3, annotation->word4);
#if defined(RTE_LIBRTE_IEEE1588)
- if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
+ if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+ }
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
@@ -769,7 +771,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
else
bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
#if defined(RTE_LIBRTE_IEEE1588)
- priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
#endif
if (eth_data->dev_conf.rxmode.offloads &
@@ -986,6 +991,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
bufs[num_rx] = eth_fd_to_mbuf(fd,
eth_data->port_id);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
+#endif
+
if (eth_data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
struct dpaa2_annot_hdr *annotation;
+ void *v_addr;
+ struct rte_mbuf *mbuf;
#endif
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
num_tx_conf++;
num_pulled++;
#if defined(RTE_LIBRTE_IEEE1588)
- annotation = (struct dpaa2_annot_hdr *)((size_t)
- DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE);
- priv->tx_timestamp = annotation->word2;
+ v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
+ annotation = (struct dpaa2_annot_hdr *)((size_t)
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE);
+ priv->tx_timestamp = annotation->word2;
+ }
#endif
} while (pending);
@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* corresponding to last packet transmitted for reading
* the timestamp
*/
- priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
- dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
+ priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+ dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ priv->tx_timestamp = 0;
+ }
#endif
/*Prepare enqueue descriptor*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 06/17] net/dpaa2: support multiple txqs en-queue for ordered
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (4 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 05/17] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 07/17] net/dpaa2: add support for level 2 in traffic management nipun.gupta
` (12 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
Support the tx enqueue in order queue mode, where queue id
for each event may be different.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/event/dpaa2/dpaa2_eventdev.c | 12 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 4 +
drivers/net/dpaa2/dpaa2_rxtx.c | 142 +++++++++++++++++++++++++++
drivers/net/dpaa2/version.map | 1 +
4 files changed, 155 insertions(+), 4 deletions(-)
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4d94c315d2..f3d8a7e4f1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017,2019 NXP
+ * Copyright 2017,2019-2021 NXP
*/
#include <assert.h>
@@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
struct rte_event ev[],
uint16_t nb_events)
{
- struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ void *txq[32];
+ struct rte_mbuf *m[32];
uint8_t qid, i;
RTE_SET_USED(port);
for (i = 0; i < nb_events; i++) {
- qid = rte_event_eth_tx_adapter_txq_get(m);
- rte_eth_tx_burst(m->port, qid, &m, 1);
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m[i]);
+ txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
}
+ dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
+
return nb_events;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c21571e63d..e001a7e49d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -241,6 +241,10 @@ void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
+__rte_internal
+uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts);
+
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index ee3ed1b152..1096b1cf1d 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1468,6 +1468,148 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to multiple queues respectively.*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ struct rte_mbuf *mi;
+ struct rte_eth_dev_data *eth_data;
+ struct dpaa2_dev_priv *priv;
+ struct dpaa2_queue *order_sendq;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ for (loop = 0; loop < nb_pkts; loop++) {
+ dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+ eth_data = dpaa2_q[loop]->eth_data;
+ priv = eth_data->dev_private;
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+ order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_fq(&eqdesc[loop],
+ dpaa2_q[loop]->fqid);
+ }
+
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & RTE_MBUF_F_TX_VLAN)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_frames;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ dpaa2_q[loop]++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_frames;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_frames;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_frames;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp,
+ bpid))
+ goto send_frames;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+
+ bufs++;
+ dpaa2_q[loop]++;
+ }
+
+send_frames:
+ frames_to_send = loop;
+ loop = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[loop],
+ frames_to_send - loop);
+ if (likely(ret > 0)) {
+ loop += ret;
+ } else {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ break;
+ }
+ }
+
+ return loop;
+}
+
/* Callback to handle sending ordered packets through WRIOP based interface */
uint16_t
dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
diff --git a/drivers/net/dpaa2/version.map b/drivers/net/dpaa2/version.map
index 2fe61f3442..cc82b8579d 100644
--- a/drivers/net/dpaa2/version.map
+++ b/drivers/net/dpaa2/version.map
@@ -21,6 +21,7 @@ EXPERIMENTAL {
INTERNAL {
global:
+ dpaa2_dev_tx_multi_txq_ordered;
dpaa2_eth_eventq_attach;
dpaa2_eth_eventq_detach;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 07/17] net/dpaa2: add support for level 2 in traffic management
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (5 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 06/17] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 08/17] net/dpaa2: secondary process handling for dpni nipun.gupta
` (11 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
This patch adds support for level 2 for QoS shaping.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
doc/guides/nics/dpaa2.rst | 2 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 55 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 6 +-
drivers/net/dpaa2/dpaa2_tm.c | 563 ++++++++++++++++++++++------
drivers/net/dpaa2/dpaa2_tm.h | 17 +-
drivers/net/dpaa2/mc/dpni.c | 302 +++++++++------
drivers/net/dpaa2/mc/fsl_dpni.h | 119 +++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 79 ++--
8 files changed, 791 insertions(+), 352 deletions(-)
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 831bc56488..2d113f53df 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -588,7 +588,7 @@ Supported Features
The following capabilities are supported:
-- Level0 (root node) and Level1 are supported.
+- Level0 (root node), Level1 and Level2 are supported.
- 1 private shaper at root node (port level) is supported.
- 8 TX queues per port supported (1 channel per port)
- Both SP and WFQ scheduling mechanisms are supported on all 8 queues.
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 18ff07249f..b91e773605 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -852,6 +852,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
uint8_t options = 0, flow_id;
+ uint16_t channel_id;
struct dpni_queue_id qid;
uint32_t tc_id;
int ret;
@@ -877,20 +878,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- tc_id = tx_queue_id;
- flow_id = 0;
-
- ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
- tc_id, flow_id, options, &tx_flow_cfg);
- if (ret) {
- DPAA2_PMD_ERR("Error in setting the tx flow: "
- "tc_id=%d, flow=%d err=%d",
- tc_id, flow_id, ret);
- return -1;
- }
-
- dpaa2_q->flow_id = flow_id;
-
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
if (priv->flags & DPAA2_TX_CONF_ENABLE)
@@ -907,10 +894,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
}
+
+ tc_id = tx_queue_id % priv->num_tx_tc;
+ channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
+ flow_id = 0;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, dpaa2_q->tc_index,
+ DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -942,7 +945,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
priv->token,
DPNI_QUEUE_TX,
- tc_id,
+ ((channel_id << 8) | tc_id),
&cong_notif_cfg);
if (ret) {
DPAA2_PMD_ERR(
@@ -959,7 +962,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
options = options | DPNI_QUEUE_OPT_USER_CTX;
tx_conf_cfg.user_context = (size_t)(dpaa2_q);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx conf flow: "
@@ -970,7 +973,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -1152,7 +1155,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpni_queue cfg;
struct dpni_error_cfg err_cfg;
- uint16_t qdid;
struct dpni_queue_id qid;
struct dpaa2_queue *dpaa2_q;
int ret, i;
@@ -1162,7 +1164,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
-
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
@@ -1173,14 +1174,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
/* Power up the phy. Needed to make the link go UP */
dpaa2_dev_set_link_up(dev);
- ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, &qdid);
- if (ret) {
- DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
- return ret;
- }
- priv->qdid = qdid;
-
for (i = 0; i < data->nb_rx_queues; i++) {
dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
@@ -2619,9 +2612,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
priv->num_rx_tc = attr.num_rx_tcs;
+ priv->num_tx_tc = attr.num_tx_tcs;
priv->qos_entries = attr.qos_entries;
priv->fs_entries = attr.fs_entries;
priv->dist_queues = attr.num_queues;
+ priv->num_channels = attr.num_channels;
+ priv->channel_inuse = 0;
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2635,8 +2631,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
for (i = 0; i < attr.num_rx_tcs; i++)
priv->nb_rx_queues += attr.num_queues;
- /* Using number of TX queues as number of TX TCs */
- priv->nb_tx_queues = attr.num_tx_tcs;
+ priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
priv->num_rx_tc, priv->nb_rx_queues,
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index e001a7e49d..1fc2fc367e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -25,6 +25,7 @@
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
#define MAX_DPNI 8
+#define DPAA2_MAX_CHANNELS 16
#define DPAA2_RX_DEFAULT_NBDESC 512
@@ -160,15 +161,17 @@ struct dpaa2_dev_priv {
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
- void *tx_conf_vq[MAX_TX_QUEUES];
+ void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
+ uint8_t num_tx_tc;
uint16_t qos_entries;
uint16_t fs_entries;
uint8_t dist_queues;
+ uint8_t num_channels;
uint8_t en_ordered;
uint8_t en_loose_ordered;
uint8_t max_cgs;
@@ -190,6 +193,7 @@ struct dpaa2_dev_priv {
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index f5faaedfb4..8fe5bfa013 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#include <rte_ethdev.h>
@@ -7,12 +7,16 @@
#include <rte_tm_driver.h>
#include "dpaa2_ethdev.h"
+#include "dpaa2_pmd_logs.h"
+#include <dpaa2_hw_dpio.h>
#define DPAA2_BURST_MAX (64 * 1024)
#define DPAA2_SHAPER_MIN_RATE 0
#define DPAA2_SHAPER_MAX_RATE 107374182400ull
#define DPAA2_WEIGHT_MAX 24701
+#define DPAA2_PKT_ADJUST_LEN_MIN 0
+#define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
int
dpaa2_tm_init(struct rte_eth_dev *dev)
@@ -66,6 +70,8 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -73,27 +79,31 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- /* root node(port) + txqs number, assuming each TX
+ /* root node(port) + channels + txqs number, assuming each TX
* Queue is mapped to each TC
*/
- cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
- cap->n_levels_max = 2; /* port level + txqs level */
+ cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
+ cap->n_levels_max = MAX_LEVEL;
cap->non_leaf_nodes_identical = 1;
cap->leaf_nodes_identical = 1;
- cap->shaper_n_max = 1;
- cap->shaper_private_n_max = 1;
- cap->shaper_private_dual_rate_n_max = 1;
+ cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
+ cap->shaper_private_n_max = 1 + priv->num_channels;
+ cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+ cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
+ cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
- cap->sched_n_children_max = dev->data->nb_tx_queues;
- cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_groups_max = 2;
- cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ if (priv->num_channels > DPNI_MAX_TC)
+ cap->sched_n_children_max = priv->num_channels;
+ else
+ cap->sched_n_children_max = DPNI_MAX_TC;
- cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
+ cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_groups_max = 2;
+ cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
return 0;
@@ -105,6 +115,8 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -112,12 +124,12 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- if (level_id > 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
- if (level_id == 0) { /* Root node */
+ if (level_id == LNI_LEVEL) { /* Root node (LNI) */
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->non_leaf_nodes_identical = 1;
@@ -127,20 +139,39 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
- cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
+ } else if (level_id == CHANNEL_LEVEL) { /* channels */
+ cap->n_nodes_max = priv->num_channels;
+ cap->n_nodes_nonleaf_max = priv->num_channels;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = 1;
+ cap->nonleaf.shaper_private_dual_rate_supported = 1;
+ cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ /* no. of class queues per channel */
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_groups_max = 2;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else { /* leaf nodes */
- cap->n_nodes_max = dev->data->nb_tx_queues;
- cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
+ /* queues per channels * channel */
+ cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
+ cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
cap->leaf_nodes_identical = 1;
- cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->leaf.shaper_private_supported = 0;
+ cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -167,18 +198,33 @@ dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- if (node->type == 0) {
+ if (node->level_id == LNI_LEVEL) {
cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels;
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ } else if (node->level_id == CHANNEL_LEVEL) {
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
- cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else {
- cap->stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -202,7 +248,7 @@ dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- *is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
+ *is_leaf = node->type == LEAF_NODE ? 1 : 0;
return 0;
}
@@ -257,6 +303,13 @@ dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
NULL, "Wrong shaper profile id\n");
+ if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
+ params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ "Not supported pkt adjust length\n");
+
profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
if (profile)
return -rte_tm_error_set(error, EEXIST,
@@ -318,7 +371,7 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL, "Weight is out of range\n");
- if (level_id != 0 && level_id != 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
@@ -338,39 +391,38 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
NULL, "Shared shaper is not supported\n");
- /* verify port (root node) settings */
+ /* verify non leaf nodes settings */
if (node_id >= dev->data->nb_tx_queues) {
if (params->nonleaf.wfq_weight_mode)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
NULL, "WFQ weight mode is not supported\n");
-
- if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
- RTE_TM_STATS_N_BYTES))
+ } else {
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested port stats are not supported\n");
-
- return 0;
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL, "Private shaper not supported on leaf\n");
}
- if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
- NULL, "Private shaper not supported on leaf\n");
-
- if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested stats are not supported\n");
/* check leaf node */
- if (level_id == 1) {
+ if (level_id == QUEUE_LEVEL) {
if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
NULL, "Only taildrop is supported\n");
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+ } else if (level_id == LNI_LEVEL) {
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
}
return 0;
@@ -407,7 +459,7 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
if (parent_node_id == RTE_TM_NODE_ID_NULL) {
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type != 0 /*root node*/)
+ if (node->level_id != LNI_LEVEL)
continue;
return -rte_tm_error_set(error, EINVAL,
@@ -435,14 +487,29 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
NULL, NULL);
node->id = node_id;
- node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
- 1/*NODE_QUEUE*/;
+
+ if (node_id > dev->data->nb_tx_queues)
+ node->type = NON_LEAF_NODE;
+ else
+ node->type = LEAF_NODE;
+
+ node->level_id = level_id;
+ if (node->level_id == CHANNEL_LEVEL) {
+ if (priv->channel_inuse < priv->num_channels) {
+ node->channel_id = priv->channel_inuse;
+ priv->channel_inuse++;
+ } else {
+ printf("error no channel id available\n");
+ }
+ }
if (parent) {
node->parent = parent;
parent->refcnt++;
}
+ /* TODO: add check if refcnt is more than supported children */
+
if (profile) {
node->profile = profile;
profile->refcnt++;
@@ -464,6 +531,7 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node;
+ /* XXX: update it */
if (0) {
return -rte_tm_error_set(error, EPERM,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -493,119 +561,326 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
+static int
+dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
+{
+ int ret = 0;
+ uint32_t tc_id;
+ uint8_t flow_id, options = 0;
+ struct dpni_queue tx_flow_cfg;
+ struct dpni_queue_id qid;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+ tc_id = node->parent->tc_id;
+ node->parent->tc_id++;
+ flow_id = 0;
+
+ if (dpaa2_q == NULL) {
+ printf("Queue is not configured for node = %d\n", node->id);
+ return -1;
+ }
+
+ DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
+ node->parent->channel_id);
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ printf("Error in setting the tx flow: "
+ "channel id = %d tc_id= %d, param = 0x%x "
+ "flow=%d err=%d\n", node->parent->channel_id, tc_id,
+ ((node->parent->channel_id << 8) | tc_id), flow_id,
+ ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+ dpaa2_q->tc_index = tc_id;
+
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
+ dpaa2_q->flow_id, &tx_flow_cfg, &qid);
+ if (ret) {
+ printf("Error in getting LFQID err=%d", ret);
+ return -1;
+ }
+ dpaa2_q->fqid = qid.fqid;
+
+ /* setting congestion notification */
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.(90% of value)
+ */
+ cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova =
+ (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+ cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ &cong_notif_cfg);
+ if (ret) {
+ printf("Error in setting tx congestion notification: "
+ "err=%d", ret);
+ return -ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
+ struct dpaa2_tm_node **nodes, int n)
+{
+ struct dpaa2_tm_node *temp_node;
+ int i;
+
+ if (n == 1) {
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+ return;
+ }
+
+ for (i = 0; i < n - 1; i++) {
+ if (nodes[i]->priority > nodes[i + 1]->priority) {
+ temp_node = nodes[i];
+ nodes[i] = nodes[i + 1];
+ nodes[i + 1] = temp_node;
+ }
+ }
+ dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
+
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+}
+
static int
dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct dpaa2_tm_node *node, *temp_node;
+ struct dpaa2_tm_node *node;
+ struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
- int ret;
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
- struct dpni_tx_priorities_cfg prio_cfg;
+ int ret, t;
+
+ /* Populate TCs */
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
+ int i = 0;
- memset(&prio_cfg, 0, sizeof(prio_cfg));
- memset(conf, 0, sizeof(conf));
+ if (channel_node->level_id != CHANNEL_LEVEL)
+ continue;
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+ if (leaf_node->parent == channel_node) {
+ if (i >= DPNI_MAX_TC) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "More children than supported\n");
+ goto out;
+ }
+ nodes[i++] = leaf_node;
+ }
+ }
+ if (i > 0) {
+ DPAA2_PMD_DEBUG("Configure queues\n");
+ dpaa2_tm_sort_and_configure(dev, nodes, i);
+ }
+ }
+
+ /* Shaping */
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type == 0/*root node*/) {
+ if (node->type == NON_LEAF_NODE) {
if (!node->profile)
continue;
-
struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
+ uint32_t param = 0;
tx_cr_shaper.max_burst_size =
node->profile->params.committed.size;
tx_cr_shaper.rate_limit =
- node->profile->params.committed.rate / (1024 * 1024);
+ node->profile->params.committed.rate /
+ (1024 * 1024);
tx_er_shaper.max_burst_size =
node->profile->params.peak.size;
tx_er_shaper.rate_limit =
node->profile->params.peak.rate / (1024 * 1024);
+ /* root node */
+ if (node->parent == NULL) {
+ DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
+ tx_cr_shaper.rate_limit,
+ tx_cr_shaper.max_burst_size);
+ param = 0x2;
+ param |= node->profile->params.pkt_length_adjust << 16;
+ } else {
+ DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
+ node->channel_id,
+ tx_cr_shaper.rate_limit);
+ param = (node->channel_id << 8);
+ }
ret = dpni_set_tx_shaping(dpni, 0, priv->token,
- &tx_cr_shaper, &tx_er_shaper, 0);
+ &tx_cr_shaper, &tx_er_shaper, param);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
"Error in setting Shaping\n");
goto out;
}
+ continue;
+ }
+ }
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ struct dpni_tx_priorities_cfg prio_cfg;
+
+ memset(&prio_cfg, 0, sizeof(prio_cfg));
+ memset(conf, 0, sizeof(conf));
+
+ /* Process for each channel */
+ if (channel_node->level_id != CHANNEL_LEVEL)
continue;
- } else { /* level 1, all leaf nodes */
- if (node->id >= dev->data->nb_tx_queues) {
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ struct dpaa2_queue *leaf_dpaa2_q;
+ uint8_t leaf_tc_id;
+
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ /* level 2, all leaf nodes */
+ if (leaf_node->id >= dev->data->nb_tx_queues) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID, NULL,
"Not enough txqs configured\n");
goto out;
}
- if (conf[node->id])
+ if (conf[leaf_node->id])
+ continue;
+
+ if (leaf_node->parent != channel_node)
continue;
- LIST_FOREACH(temp_node, &priv->nodes, next) {
- if (temp_node->id == node->id ||
- temp_node->type == 0)
+ leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
+ leaf_tc_id = leaf_dpaa2_q->tc_index;
+ /* Process sibling leaf nodes */
+ LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
+ if (temp_leaf_node->id == leaf_node->id ||
+ temp_leaf_node->level_id == LNI_LEVEL ||
+ temp_leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ if (temp_leaf_node->parent != channel_node)
continue;
- if (conf[temp_node->id])
+
+ if (conf[temp_leaf_node->id])
continue;
- if (node->priority == temp_node->priority) {
+
+ if (leaf_node->priority == temp_leaf_node->priority) {
+ struct dpaa2_queue *temp_leaf_dpaa2_q;
+ uint8_t temp_leaf_tc_id;
+
+ temp_leaf_dpaa2_q = (struct dpaa2_queue *)
+ dev->data->tx_queues[temp_leaf_node->id];
+ temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
if (wfq_grp == 0) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- /* DPDK support lowest weight 1
- * and DPAA2 platform 100
- */
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ /* DPAA2 support weight in multiple of 100 */
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else {
- /*TODO: add one more check for
- * number of nodes in a group
- */
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Only 2 WFQ Groups are supported\n");
goto out;
}
- conf[temp_node->id] = 1;
is_wfq_grp = 1;
+ conf[temp_leaf_node->id] = 1;
}
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_A = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_A = leaf_node->priority;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_B = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_B = leaf_node->priority;
}
wfq_grp++;
is_wfq_grp = 0;
}
- conf[node->id] = 1;
+ conf[leaf_node->id] = 1;
}
- if (wfq_grp)
+ if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
- }
- ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
- if (ret) {
- ret = -rte_tm_error_set(error, EINVAL,
+ if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
+ prio_cfg.prio_group_A = 0;
+ prio_cfg.prio_group_B = 1;
+ } else {
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.prio_group_B = 0;
+ }
+ }
+
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.channel_idx = channel_node->channel_id;
+ ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
+ if (ret) {
+ ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Scheduling Failed\n");
- goto out;
+ goto out;
+ }
+ DPAA2_PMD_DEBUG("########################################\n");
+ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++) {
+ DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
+ DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
+ }
+ DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
+ DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
+ DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
-
return 0;
out:
@@ -617,6 +892,81 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
return ret;
}
+static int
+dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats, uint64_t *stats_mask,
+ int clear, struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ union dpni_statistics value;
+ int ret = 0;
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (stats_mask)
+ *stats_mask = node->stats_mask;
+
+ if (!stats)
+ return 0;
+
+ memset(stats, 0, sizeof(*stats));
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ if (node->level_id == LNI_LEVEL) {
+ uint8_t page1 = 1;
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, 0, &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read port statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_1.egress_all_frames;
+
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_1.egress_all_bytes;
+
+ if (clear) {
+ ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to reset port statistics\n");
+ }
+ } else if (node->level_id == QUEUE_LEVEL) {
+ uint8_t page3 = 3;
+ struct dpaa2_queue *dpaa2_q;
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page3,
+ (node->parent->channel_id << 8 |
+ dpaa2_q->tc_index), &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read queue statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_3.ceetm_dequeue_frames;
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
+ } else {
+ return -rte_tm_error_set(error, -1,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read channel statistics\n");
+ }
+
+ return 0;
+}
+
const struct rte_tm_ops dpaa2_tm_ops = {
.node_type_get = dpaa2_node_type_get,
.capabilities_get = dpaa2_capabilities_get,
@@ -627,4 +977,5 @@ const struct rte_tm_ops dpaa2_tm_ops = {
.node_add = dpaa2_node_add,
.node_delete = dpaa2_node_delete,
.hierarchy_commit = dpaa2_hierarchy_commit,
+ .node_stats_read = dpaa2_node_stats_read,
};
diff --git a/drivers/net/dpaa2/dpaa2_tm.h b/drivers/net/dpaa2/dpaa2_tm.h
index 6632fab687..cfbb437322 100644
--- a/drivers/net/dpaa2/dpaa2_tm.h
+++ b/drivers/net/dpaa2/dpaa2_tm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#ifndef _DPAA2_TM_H_
@@ -7,6 +7,18 @@
#include <rte_tm.h>
+enum node_type {
+ NON_LEAF_NODE = 0,
+ LEAF_NODE
+};
+
+enum level_type {
+ LNI_LEVEL = 0,
+ CHANNEL_LEVEL,
+ QUEUE_LEVEL,
+ MAX_LEVEL
+};
+
struct dpaa2_tm_shaper_profile {
LIST_ENTRY(dpaa2_tm_shaper_profile) next;
uint32_t id;
@@ -18,6 +30,9 @@ struct dpaa2_tm_node {
LIST_ENTRY(dpaa2_tm_node) next;
uint32_t id;
uint32_t type;
+ uint32_t level_id;
+ uint16_t channel_id; /* Only for level 1 nodes */
+ uint16_t tc_id; /* Only for level 1 nodes */
int refcnt;
struct dpaa2_tm_node *parent;
struct dpaa2_tm_shaper_profile *profile;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index cf78295d90..b7a65cb637 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -916,6 +916,44 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_link_cfg() - return the link configuration configured by
+ * dpni_set_link_cfg().
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cfg->advertising = le64_to_cpu(rsp_params->advertising);
+ cfg->options = le64_to_cpu(rsp_params->options);
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+
+ return err;
+}
+
/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
@@ -1678,6 +1716,38 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_tx_confirmation_mode() - Get Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode *mode)
+{
+ struct dpni_tx_confirmation_mode *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_tx_confirmation_mode *)cmd.params;
+ *mode = rsp_params->confirmation_mode;
+
+ return 0;
+}
+
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
@@ -2733,6 +2803,122 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_load_ss_cfg *cfg)
+{
+ struct dpni_load_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
+ cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_enable_ss_cfg *cfg)
+{
+ struct dpni_enable_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->set_start = cfg->set_start;
+ cmd_params->hxs = cpu_to_le16(cfg->hxs);
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->param_offset = cfg->param_offset;
+ cmd_params->param_size = cfg->param_size;
+ cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_sw_sequence_layout() - Get the soft sequence layout
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @src: Source of the layout (WRIOP Rx or Tx)
+ * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
+ *
+ * warning: After calling this function, call dpni_extract_sw_sequence_layout()
+ * to get the layout.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_soft_sequence_dest src,
+ uint64_t ss_layout_iova)
+{
+ struct dpni_get_sw_sequence_layout *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
+ cmd_flags,
+ token);
+
+ cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
+ cmd_params->src = src;
+ cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_extract_sw_sequence_layout() - extract the software sequence layout
+ * @layout: software sequence layout
+ * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
+ * to DMA
+ *
+ * This function has to be called after dpni_get_sw_sequence_layout
+ *
+ */
+void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
+ const uint8_t *sw_sequence_layout_buf)
+{
+ const struct dpni_sw_sequence_layout_entry *ext_params;
+ int i;
+ uint16_t ss_size, ss_offset;
+
+ ext_params = (const struct dpni_sw_sequence_layout_entry *)
+ sw_sequence_layout_buf;
+
+ for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
+ ss_offset = le16_to_cpu(ext_params[i].ss_offset);
+ ss_size = le16_to_cpu(ext_params[i].ss_size);
+
+ if (ss_offset == 0 && ss_size == 0) {
+ layout->num_ss = i;
+ return;
+ }
+
+ layout->ss[i].ss_offset = ss_offset;
+ layout->ss[i].ss_size = ss_size;
+ layout->ss[i].param_offset = ext_params[i].param_offset;
+ layout->ss[i].param_size = ext_params[i].param_size;
+ }
+}
/**
* dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
* @mc_io: Pointer to MC portal's I/O object
@@ -2901,119 +3087,3 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
-int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_load_ss_cfg *cfg)
-{
- struct dpni_load_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
- cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_enable_ss_cfg *cfg)
-{
- struct dpni_enable_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->set_start = cfg->set_start;
- cmd_params->hxs = cpu_to_le16(cfg->hxs);
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->param_offset = cfg->param_offset;
- cmd_params->param_size = cfg->param_size;
- cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @src: Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- * to get the layout.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_soft_sequence_dest src,
- uint64_t ss_layout_iova)
-{
- struct dpni_get_sw_sequence_layout *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
- cmd_flags,
- token);
-
- cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
- cmd_params->src = src;
- cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout: software sequence layout
- * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
- * to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
- const uint8_t *sw_sequence_layout_buf)
-{
- const struct dpni_sw_sequence_layout_entry *ext_params;
- int i;
- uint16_t ss_size, ss_offset;
-
- ext_params = (const struct dpni_sw_sequence_layout_entry *)
- sw_sequence_layout_buf;
-
- for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
- ss_offset = le16_to_cpu(ext_params[i].ss_offset);
- ss_size = le16_to_cpu(ext_params[i].ss_size);
-
- if (ss_offset == 0 && ss_size == 0) {
- layout->num_ss = i;
- return;
- }
-
- layout->ss[i].ss_offset = ss_offset;
- layout->ss[i].ss_size = ss_size;
- layout->ss[i].param_offset = ext_params[i].param_offset;
- layout->ss[i].param_size = ext_params[i].param_size;
- }
-}
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 8aead28261..c7df727fef 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -761,6 +761,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
@@ -1709,63 +1714,6 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
uint8_t flags,
uint8_t opr_id);
-/**
- * When used for queue_idx in function dpni_set_rx_dist_default_queue will
- * signal to dpni to drop all unclassified frames
- */
-#define DPNI_FS_MISS_DROP ((uint16_t)-1)
-
-/**
- * struct dpni_rx_dist_cfg - distribution configuration
- * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
- * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
- * 512,768,896,1024
- * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
- * the extractions to be used for the distribution key by calling
- * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
- * it can be '0'
- * @enable: enable/disable the distribution.
- * @tc: TC id for which distribution is set
- * @fs_miss_flow_id: when packet misses all rules from flow steering table and
- * hash is disabled it will be put into this queue id; use
- * DPNI_FS_MISS_DROP to drop frames. The value of this field is
- * used only when flow steering distribution is enabled and hash
- * distribution is disabled
- */
-struct dpni_rx_dist_cfg {
- uint16_t dist_size;
- uint64_t key_cfg_iova;
- uint8_t enable;
- uint8_t tc;
- uint16_t fs_miss_flow_id;
-};
-
-int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-/**
- * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
- * values used in current dpni object to detect 802.1q frames.
- * @tpid1: first tag. Not used if zero.
- * @tpid2: second tag. Not used if zero.
- */
-struct dpni_custom_tpid_cfg {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, struct dpni_custom_tpid_cfg *tpid);
-
/**
* enum dpni_soft_sequence_dest - Enumeration of WRIOP software sequence
* destinations
@@ -1936,4 +1884,61 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_port_cfg *port_cfg);
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue will
+ * signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ * 512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ uint16_t dist_size;
+ uint64_t key_cfg_iova;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+/**
+ * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
+ * values used in current dpni object to detect 802.1q frames.
+ * @tpid1: first tag. Not used if zero.
+ * @tpid2: second tag. Not used if zero.
+ */
+struct dpni_custom_tpid_cfg {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
+int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, struct dpni_custom_tpid_cfg *tpid);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 8bff2ec9af..ed0bd7615a 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -108,16 +108,17 @@
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
+#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270)
#define DPNI_CMDID_ENABLE_SW_SEQUENCE DPNI_CMD(0x271)
#define DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT DPNI_CMD(0x272)
-#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
-#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -451,8 +452,6 @@ struct dpni_cmd_enable_vlan_filter {
uint8_t en;
};
-#define DPNI_VLAN_SET_QUEUE_ACTION 1
-
struct dpni_cmd_vlan_id {
uint8_t flags;
uint8_t tc_id;
@@ -854,42 +853,6 @@ struct dpni_rsp_get_opr {
uint16_t opr_id;
};
-struct dpni_cmd_add_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_cmd_remove_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_rsp_get_custom_tpid {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_fs_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc;
- uint16_t miss_flow_id;
- uint16_t pad1;
- uint64_t key_cfg_iova;
-};
-
-#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_hash_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc_id;
- uint32_t pad;
- uint64_t key_cfg_iova;
-};
-
struct dpni_load_sw_sequence {
uint8_t dest;
uint8_t pad0[7];
@@ -957,5 +920,41 @@ struct dpni_rsp_get_port_cfg {
uint32_t bit_params;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t miss_flow_id;
+ uint16_t pad1;
+ uint64_t key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc_id;
+ uint32_t pad;
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_add_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_cmd_remove_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_rsp_get_custom_tpid {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 08/17] net/dpaa2: secondary process handling for dpni
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (6 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 07/17] net/dpaa2: add support for level 2 in traffic management nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 09/17] bus/fslmc: add and scan dprc devices nipun.gupta
` (10 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
This change uses 'dev->process_private' instead of 'priv->hw'
to get dpmcp per process while setting flow distribution,
as priv->hw is only valid for primary process.
It also initialize rte_dpaa2_bpid_info in secondary process.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +++++++++++++++++++++++
drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +++++++++++++++
drivers/mempool/dpaa2/version.map | 1 +
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 ++---
drivers/net/dpaa2/dpaa2_ethdev.c | 10 ++++++++--
drivers/net/dpaa2/dpaa2_ethdev.h | 3 ++-
6 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 39c6252a63..56c629c681 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -263,6 +263,29 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
}
}
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info = mempool_to_bpinfo(mp);
+ uint32_t bpid = bp_info->bpid;
+
+ if (!rte_dpaa2_bpid_info) {
+ rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa2_bpid_info == NULL)
+ return -ENOMEM;
+ memset(rte_dpaa2_bpid_info, 0,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID);
+ }
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_info->bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ return 0;
+}
+
uint16_t
rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
{
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
index 4a22b7c42e..28dea74326 100644
--- a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -46,6 +46,21 @@ rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
struct rte_mbuf *
rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+/**
+ * Initialize the rte_dpaa2_bpid_info
+ * In generial, it is called in the secondary process and
+ * mp has been created in the primary process.
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * - 0 on success.
+ * - (<0) on failure.
+ */
+__rte_internal
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/mempool/dpaa2/version.map b/drivers/mempool/dpaa2/version.map
index 49c460ec54..cfd4ae617a 100644
--- a/drivers/mempool/dpaa2/version.map
+++ b/drivers/mempool/dpaa2/version.map
@@ -11,5 +11,6 @@ INTERNAL {
global:
rte_dpaa2_bpid_info;
+ rte_dpaa2_bpid_info_init;
rte_dpaa2_mbuf_alloc_bulk;
};
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 3170694841..9509f6e8a3 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -95,7 +95,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
uint64_t req_dist_set, int tc_index)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
- struct fsl_mc_io *dpni = priv->hw;
+ struct fsl_mc_io *dpni = eth_dev->process_private;
struct dpni_rx_dist_cfg tc_cfg;
struct dpkg_profile_cfg kg_cfg;
void *p_params;
@@ -457,13 +457,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
int
dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
- void *blist)
+ struct fsl_mc_io *dpni, void *blist)
{
/* Function to attach a DPNI with a buffer pool list. Buffer pool list
* handle is passed in blist.
*/
int32_t retcode;
- struct fsl_mc_io *dpni = priv->hw;
struct dpni_pools_cfg bpool_cfg;
struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
struct dpni_buffer_layout layout;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index b91e773605..a45beed75f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -18,6 +18,7 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
#include <rte_flow_driver.h>
+#include "rte_dpaa2_mempool.h"
#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
@@ -712,9 +713,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ret = rte_dpaa2_bpid_info_init(mb_pool);
+ if (ret)
+ return ret;
+ }
bpid = mempool_to_bpid(mb_pool);
- ret = dpaa2_attach_bp_list(priv,
- rte_dpaa2_bpid_info[bpid].bp_list);
+ ret = dpaa2_attach_bp_list(priv, dpni,
+ rte_dpaa2_bpid_info[bpid].bp_list);
if (ret)
return ret;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 1fc2fc367e..bd33a22a8e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -208,7 +208,8 @@ int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
-int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ struct fsl_mc_io *dpni, void *blist);
__rte_internal
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 09/17] bus/fslmc: add and scan dprc devices
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (7 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 08/17] net/dpaa2: secondary process handling for dpni nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 10/17] net/dpaa2: support recycle loopback port nipun.gupta
` (9 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang, Nipun Gupta
From: Jun Yang <jun.yang@nxp.com>
In order to get connection endpoint of each objects,
scan the dprc object.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/fslmc_bus.c | 15 ++-
drivers/bus/fslmc/fslmc_vfio.c | 18 +++-
drivers/bus/fslmc/mc/dprc.c | 129 +++++++++++++++++++++++
drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++++++++
drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 +++++++++
drivers/bus/fslmc/meson.build | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 ++++++++++++++++++
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 12 +++
drivers/bus/fslmc/rte_fslmc.h | 10 +-
9 files changed, 374 insertions(+), 8 deletions(-)
create mode 100644 drivers/bus/fslmc/mc/dprc.c
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index a0ef24cdc8..a3c0d838c4 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2018-2019 NXP
+ * Copyright 2016,2018-2021 NXP
*
*/
@@ -136,10 +136,6 @@ scan_one_fslmc_device(char *dev_name)
if (!dev_name)
return ret;
- /* Ignore the Container name itself */
- if (!strncmp("dprc", dev_name, 4))
- return 0;
-
/* Creating a temporary copy to perform cut-parse over string */
dup_dev_name = strdup(dev_name);
if (!dup_dev_name) {
@@ -197,6 +193,8 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_MUX;
else if (!strncmp("dprtc", t_ptr, 5))
dev->dev_type = DPAA2_DPRTC;
+ else if (!strncmp("dprc", t_ptr, 4))
+ dev->dev_type = DPAA2_DPRC;
else
dev->dev_type = DPAA2_UNKNOWN;
@@ -339,6 +337,13 @@ rte_fslmc_scan(void)
goto scan_fail;
}
+ /* Scan the DPRC container object */
+ ret = scan_one_fslmc_device(fslmc_container);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+
while ((entry = readdir(dir)) != NULL) {
if (entry->d_name[0] == '.' || entry->d_type != DT_DIR)
continue;
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index b4704eeae4..1b89a56bbc 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -728,6 +728,7 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
case DPAA2_BPOOL:
case DPAA2_DPRTC:
case DPAA2_MUX:
+ case DPAA2_DPRC:
TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
if (dev->dev_type == object->dev_type)
object->create(dev_fd, &device_info,
@@ -881,6 +882,21 @@ fslmc_vfio_process_group(void)
return -1;
}
+ /* Search for DPRC device next as it updates endpoint of
+ * other devices.
+ */
+ current_device = 0;
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_DPRC) {
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to process dprc");
+ return -1;
+ }
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ }
+ }
+
current_device = 0;
RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
dev_temp) {
diff --git a/drivers/bus/fslmc/mc/dprc.c b/drivers/bus/fslmc/mc/dprc.c
new file mode 100644
index 0000000000..491081c7c8
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dprc.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dprc.h>
+#include <fsl_dprc_cmd.h>
+
+/** @addtogroup dprc
+ * @{
+ */
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENAVAIL if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dprc.h b/drivers/bus/fslmc/mc/fsl_dprc.h
new file mode 100644
index 0000000000..177210c2d4
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#ifndef _FSL_DPRC_H
+#define _FSL_DPRC_H
+
+/** @addtogroup dprc Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ * @{
+ */
+
+struct fsl_mc_io;
+
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ uint16_t if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+#endif /* _FSL_DPRC_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dprc_cmd.h b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
new file mode 100644
index 0000000000..6efa5634d2
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+
+#ifndef _FSL_DPRC_CMD_H
+#define _FSL_DPRC_CMD_H
+
+/* Minimal supported DPRC Version */
+#define DPRC_VER_MAJOR 6
+#define DPRC_VER_MINOR 6
+
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+#pragma pack(push, 1)
+struct dprc_cmd_open {
+ uint32_t container_id;
+};
+
+struct dprc_cmd_get_connection {
+ uint32_t ep1_id;
+ uint16_t ep1_interface_id;
+ uint16_t pad;
+
+ uint8_t ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ uint64_t pad[3];
+ uint32_t ep2_id;
+ uint16_t ep2_interface_id;
+ uint16_t pad1;
+ uint8_t ep2_type[16];
+ uint32_t state;
+};
+#pragma pack(pop)
+#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
index 54be76f516..162ca286fe 100644
--- a/drivers/bus/fslmc/meson.build
+++ b/drivers/bus/fslmc/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
if not is_linux
build = false
@@ -16,10 +16,12 @@ sources = files(
'mc/dpdmai.c',
'mc/dpio.c',
'mc/dpmng.c',
+ 'mc/dprc.c',
'mc/mc_sys.c',
'portal/dpaa2_hw_dpbp.c',
'portal/dpaa2_hw_dpci.c',
'portal/dpaa2_hw_dpio.c',
+ 'portal/dpaa2_hw_dprc.c',
'qbman/qbman_portal.c',
'qbman/qbman_debug.c',
)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
new file mode 100644
index 0000000000..ca1d0304d5
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dprc.h>
+#include "portal/dpaa2_hw_pvt.h"
+
+TAILQ_HEAD(dprc_dev_list, dpaa2_dprc_dev);
+static struct dprc_dev_list dprc_dev_list
+ = TAILQ_HEAD_INITIALIZER(dprc_dev_list); /*!< DPRC device list */
+
+static int
+rte_dpaa2_create_dprc_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dprc_id)
+{
+ struct dpaa2_dprc_dev *dprc_node;
+ struct dprc_endpoint endpoint1, endpoint2;
+ struct rte_dpaa2_device *dev, *dev_tmp;
+ int ret;
+
+ /* Allocate DPAA2 dprc handle */
+ dprc_node = rte_malloc(NULL, sizeof(struct dpaa2_dprc_dev), 0);
+ if (!dprc_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPRC Device");
+ return -ENOMEM;
+ }
+
+ /* Open the dprc object */
+ dprc_node->dprc.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ dprc_node->dprc_id = dprc_id;
+ ret = dprc_open(&dprc_node->dprc,
+ CMD_PRI_LOW, dprc_id, &dprc_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_tmp) {
+ if (dev->dev_type == DPAA2_ETH) {
+ int link_state;
+
+ memset(&endpoint1, 0, sizeof(struct dprc_endpoint));
+ memset(&endpoint2, 0, sizeof(struct dprc_endpoint));
+ strcpy(endpoint1.type, "dpni");
+ endpoint1.id = dev->object_id;
+ ret = dprc_get_connection(&dprc_node->dprc,
+ CMD_PRI_LOW,
+ dprc_node->token,
+ &endpoint1, &endpoint2,
+ &link_state);
+ if (ret) {
+ DPAA2_BUS_ERR("dpni.%d connection failed!",
+ dev->object_id);
+ dprc_close(&dprc_node->dprc, CMD_PRI_LOW,
+ dprc_node->token);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ if (!strcmp(endpoint2.type, "dpmac"))
+ dev->ep_dev_type = DPAA2_MAC;
+ else if (!strcmp(endpoint2.type, "dpni"))
+ dev->ep_dev_type = DPAA2_ETH;
+ else if (!strcmp(endpoint2.type, "dpdmux"))
+ dev->ep_dev_type = DPAA2_MUX;
+ else
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+
+ dev->ep_object_id = endpoint2.id;
+ } else {
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+ }
+ sprintf(dev->ep_name, "%s.%d", endpoint2.type, endpoint2.id);
+ }
+
+ TAILQ_INSERT_TAIL(&dprc_dev_list, dprc_node, next);
+
+ return 0;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dprc_obj = {
+ .dev_type = DPAA2_DPRC,
+ .create = rte_dpaa2_create_dprc_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dprc, rte_dpaa2_dprc_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index b1bba1ac36..8cb4d404aa 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -187,6 +187,18 @@ struct swp_active_dqs {
extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+/**
+ * A structure describing a DPAA2 container.
+ */
+struct dpaa2_dprc_dev {
+ TAILQ_ENTRY(dpaa2_dprc_dev) next;
+ /**< Pointer to Next device instance */
+ const char *name;
+ struct fsl_mc_io dprc; /** handle to DPRC portal object */
+ uint16_t token;
+ uint32_t dprc_id; /*HW ID for DPRC object */
+};
+
struct dpaa2_dpci_dev {
TAILQ_ENTRY(dpaa2_dpci_dev) next;
/**< Pointer to Next device instance */
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 729f360646..12b586b13b 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2019 NXP
+ * Copyright 2016,2021 NXP
*
*/
@@ -37,6 +37,9 @@ extern "C" {
#include <fslmc_vfio.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
#define DPAA2_INVALID_MBUF_SEQN 0
@@ -88,6 +91,8 @@ enum rte_dpaa2_dev_type {
DPAA2_QDMA, /**< DPDMAI type device */
DPAA2_MUX, /**< DPDMUX type device */
DPAA2_DPRTC, /**< DPRTC type device */
+ DPAA2_DPRC, /**< DPRC type device */
+ DPAA2_MAC, /**< DPMAC type device */
/* Unknown device placeholder */
DPAA2_UNKNOWN,
DPAA2_DEVTYPE_MAX,
@@ -122,6 +127,9 @@ struct rte_dpaa2_device {
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */
uint16_t object_id; /**< DPAA2 Object ID */
+ enum rte_dpaa2_dev_type ep_dev_type; /**< Endpoint Device Type */
+ uint16_t ep_object_id; /**< Endpoint DPAA2 Object ID */
+ char ep_name[RTE_DEV_NAME_MAX_LEN];
struct rte_intr_handle *intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 10/17] net/dpaa2: support recycle loopback port
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (8 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 09/17] bus/fslmc: add and scan dprc devices nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 11/17] net/dpaa: check status before configuring shared MAC nipun.gupta
` (8 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
DPAA2 recycle port is used for configuring the device
in the loopback mode. Loopback configuraiton can be at
dpni level or at serdes level.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 3 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 32 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 23 +
drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++++
drivers/net/dpaa2/mc/dpni.c | 32 +
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1 +
drivers/net/dpaa2/meson.build | 1 +
7 files changed, 870 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 8cb4d404aa..4d0f7e4b5d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -176,6 +176,7 @@ struct dpaa2_queue {
uint16_t nb_desc;
uint16_t resv;
uint64_t offloads;
+ uint64_t lpbk_cntx;
} __rte_cache_aligned;
struct swp_active_dqs {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a45beed75f..d81f8cb07a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -668,6 +668,30 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
+ if (eth_conf->lpbk_mode) {
+ ret = dpaa2_dev_recycle_config(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to configure %s to recycle port.",
+ dev->data->name);
+
+ return ret;
+ }
+ } else {
+ /** User may disable loopback mode by calling
+ * "dev_configure" with lpbk_mode cleared.
+ * No matter the port was configured recycle or not,
+ * recycle de-configure is called here.
+ * If port is not recycled, the de-configure will return directly.
+ */
+ ret = dpaa2_dev_recycle_deconfig(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
+ dev->data->name);
+
+ return ret;
+ }
+ }
+
dpaa2_tm_init(dev);
return 0;
@@ -2601,6 +2625,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
+ if (eth_dev->data->dev_conf.lpbk_mode)
+ dpaa2_dev_recycle_deconfig(eth_dev);
+
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -2624,6 +2651,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->dist_queues = attr.num_queues;
priv->num_channels = attr.num_channels;
priv->channel_inuse = 0;
+ rte_spinlock_init(&priv->lpbk_qp_lock);
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2808,7 +2836,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return ret;
}
}
- RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
+ eth_dev->data->name, dpaa2_dev->ep_name);
+
return 0;
init_err:
dpaa2_dev_close(eth_dev);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index bd33a22a8e..b032da9eff 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -11,6 +11,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_pmd_dpaa2.h>
+#include <rte_fslmc.h>
#include <dpaa2_hw_pvt.h>
#include "dpaa2_tm.h"
@@ -65,6 +66,18 @@
/* Tx confirmation enabled */
#define DPAA2_TX_CONF_ENABLE 0x06
+/* HW loopback the egress traffic to self ingress*/
+#define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
+
+#define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
+
+#define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
+
+#define DPAA2_TX_LOOPBACK_MODE \
+ (DPAA2_TX_MAC_LOOPBACK_MODE | \
+ DPAA2_TX_SERDES_LOOPBACK_MODE | \
+ DPAA2_TX_DPNI_LOOPBACK_MODE)
+
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
RTE_ETH_RSS_IP | \
@@ -192,6 +205,7 @@ struct dpaa2_dev_priv {
struct dpaa2_queue *next_tx_conf_queue;
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ rte_spinlock_t lpbk_qp_lock;
uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
@@ -268,4 +282,13 @@ int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
uint32_t flags __rte_unused);
int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
+
+int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq);
+
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_recycle.c b/drivers/net/dpaa2/dpaa2_recycle.c
new file mode 100644
index 0000000000..e274d24ead
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_recycle.c
@@ -0,0 +1,780 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2019-2021 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_fslmc.h>
+#include <rte_flow_driver.h>
+
+#include "dpaa2_pmd_logs.h"
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <mc/fsl_dpmng.h>
+#include "dpaa2_ethdev.h"
+#include "dpaa2_sparser.h"
+#include <fsl_qbman_debug.h>
+
+#include <rte_io.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define LSX_SERDES_LAN_NB 8
+#define LSX_SERDES_REG_BASE 0x1ea0000
+#define LSX_LB_EN_BIT 0x10000000
+
+#define CONFIG_SYS_IMMR 0x01000000
+
+#define CONFIG_SYS_FSL_GUTS_ADDR (CONFIG_SYS_IMMR + 0x00E00000)
+#define CONFIG_SYS_FSL_SERDES_ADDR (CONFIG_SYS_IMMR + 0xEA0000)
+
+#define FSL_LX_SRDS1_PRTCL_SHIFT 16
+#define FSL_LX_SRDS2_PRTCL_SHIFT 21
+#define FSL_LX_SRDS3_PRTCL_SHIFT 26
+
+#define FSL_LS_SRDS1_PRTCL_SHIFT 16
+#define FSL_LS_SRDS2_PRTCL_SHIFT 0
+
+#define FSL_LX_SRDS1_REGSR 29
+#define FSL_LX_SRDS2_REGSR 29
+#define FSL_LX_SRDS3_REGSR 29
+
+#define FSL_LS_SRDS1_REGSR 29
+#define FSL_LS_SRDS2_REGSR 30
+
+#define FSL_LX_SRDS1_PRTCL_MASK 0x001F0000
+#define FSL_LX_SRDS2_PRTCL_MASK 0x03E00000
+#define FSL_LX_SRDS3_PRTCL_MASK 0x7C000000
+
+#define FSL_LS_SRDS1_PRTCL_MASK 0xFFFF0000
+#define FSL_LS_SRDS2_PRTCL_MASK 0x0000FFFF
+
+struct ccsr_lx_serdes_lan {
+ uint8_t unused1[0xa0];
+ uint32_t lnatcsr0;
+ uint8_t unused2[0x100 - 0xa4];
+} __rte_packed;
+
+struct ccsr_lx_serdes {
+ uint8_t unused0[0x800];
+ struct ccsr_lx_serdes_lan lane[LSX_SERDES_LAN_NB];
+} __rte_packed;
+
+struct ccsr_ls_serdes {
+ uint8_t unused[0x800];
+ struct serdes_lane {
+ uint32_t gcr0; /* General Control Register 0 */
+ uint32_t gcr1; /* General Control Register 1 */
+ uint32_t gcr2; /* General Control Register 2 */
+ uint32_t ssc0; /* Speed Switch Control 0 */
+ uint32_t rec0; /* Receive Equalization Control 0 */
+ uint32_t rec1; /* Receive Equalization Control 1 */
+ uint32_t tec0; /* Transmit Equalization Control 0 */
+ uint32_t ssc1; /* Speed Switch Control 1 */
+ uint32_t ttlc;
+ uint32_t rev[6];
+ uint32_t tsc3;
+ } lane[LSX_SERDES_LAN_NB];
+ uint8_t res5[0x19fc - 0xa00];
+} __rte_packed;
+
+struct ccsr_gur {
+ uint32_t porsr1; /* POR status 1 */
+ uint32_t porsr2; /* POR status 2 */
+ uint8_t res_008[0x20 - 0x8];
+ uint32_t gpporcr1; /* General-purpose POR configuration */
+ uint32_t gpporcr2; /* General-purpose POR configuration 2 */
+ uint32_t gpporcr3;
+ uint32_t gpporcr4;
+ uint8_t res_030[0x60 - 0x30];
+ uint32_t dcfg_fusesr; /* Fuse status register */
+ uint8_t res_064[0x70 - 0x64];
+ uint32_t devdisr; /* Device disable control 1 */
+ uint32_t devdisr2; /* Device disable control 2 */
+ uint32_t devdisr3; /* Device disable control 3 */
+ uint32_t devdisr4; /* Device disable control 4 */
+ uint32_t devdisr5; /* Device disable control 5 */
+ uint32_t devdisr6; /* Device disable control 6 */
+ uint8_t res_088[0x94 - 0x88];
+ uint32_t coredisr; /* Device disable control 7 */
+ uint8_t res_098[0xa0 - 0x98];
+ uint32_t pvr; /* Processor version */
+ uint32_t svr; /* System version */
+ uint8_t res_0a8[0x100 - 0xa8];
+ uint32_t rcwsr[30]; /* Reset control word status */
+
+ uint8_t res_178[0x200 - 0x178];
+ uint32_t scratchrw[16]; /* Scratch Read/Write */
+ uint8_t res_240[0x300 - 0x240];
+ uint32_t scratchw1r[4]; /* Scratch Read (Write once) */
+ uint8_t res_310[0x400 - 0x310];
+ uint32_t bootlocptrl; /* Boot location pointer low-order addr */
+ uint32_t bootlocptrh; /* Boot location pointer high-order addr */
+ uint8_t res_408[0x520 - 0x408];
+ uint32_t usb1_amqr;
+ uint32_t usb2_amqr;
+ uint8_t res_528[0x530 - 0x528]; /* add more registers when needed */
+ uint32_t sdmm1_amqr;
+ uint32_t sdmm2_amqr;
+ uint8_t res_538[0x550 - 0x538]; /* add more registers when needed */
+ uint32_t sata1_amqr;
+ uint32_t sata2_amqr;
+ uint32_t sata3_amqr;
+ uint32_t sata4_amqr;
+ uint8_t res_560[0x570 - 0x560]; /* add more registers when needed */
+ uint32_t misc1_amqr;
+ uint8_t res_574[0x590 - 0x574]; /* add more registers when needed */
+ uint32_t spare1_amqr;
+ uint32_t spare2_amqr;
+ uint32_t spare3_amqr;
+ uint8_t res_59c[0x620 - 0x59c]; /* add more registers when needed */
+ uint32_t gencr[7]; /* General Control Registers */
+ uint8_t res_63c[0x640 - 0x63c]; /* add more registers when needed */
+ uint32_t cgensr1; /* Core General Status Register */
+ uint8_t res_644[0x660 - 0x644]; /* add more registers when needed */
+ uint32_t cgencr1; /* Core General Control Register */
+ uint8_t res_664[0x740 - 0x664]; /* add more registers when needed */
+ uint32_t tp_ityp[64]; /* Topology Initiator Type Register */
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } tp_cluster[4]; /* Core cluster n Topology Register */
+ uint8_t res_864[0x920 - 0x864]; /* add more registers when needed */
+ uint32_t ioqoscr[8]; /*I/O Quality of Services Register */
+ uint32_t uccr;
+ uint8_t res_944[0x960 - 0x944]; /* add more registers when needed */
+ uint32_t ftmcr;
+ uint8_t res_964[0x990 - 0x964]; /* add more registers when needed */
+ uint32_t coredisablesr;
+ uint8_t res_994[0xa00 - 0x994]; /* add more registers when needed */
+ uint32_t sdbgcr; /*Secure Debug Confifuration Register */
+ uint8_t res_a04[0xbf8 - 0xa04]; /* add more registers when needed */
+ uint32_t ipbrr1;
+ uint32_t ipbrr2;
+ uint8_t res_858[0x1000 - 0xc00];
+} __rte_packed;
+
+static void *lsx_ccsr_map_region(uint64_t addr, size_t len)
+{
+ int fd;
+ void *tmp;
+ uint64_t start;
+ uint64_t offset;
+
+ fd = open("/dev/mem", O_RDWR);
+ if (fd < 0) {
+ DPAA2_PMD_ERR("Fail to open /dev/mem");
+ return NULL;
+ }
+
+ start = addr & PAGE_MASK;
+ offset = addr - start;
+ len = len & PAGE_MASK;
+ if (len < (size_t)PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ tmp = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, start);
+
+ close(fd);
+
+ if (tmp != MAP_FAILED)
+ return (uint8_t *)tmp + offset;
+ else
+ return NULL;
+}
+
+static const uint8_t ls_sd1_prot_idx_map[] = {
+ 0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a,
+ 0x2b, 0x2d, 0x2e, 0x30, 0x32, 0x33, 0x35,
+ 0x37, 0x39, 0x3b, 0x4b, 0x4c, 0x4d, 0x58
+};
+
+static const uint8_t ls_sd2_prot_idx_map[] = {
+ 0x07, 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20,
+ 0x22, 0x24, 0x3d, 0x3f, 0x41, 0x43, 0x45,
+ 0x47, 0x49, 0x4f, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57
+};
+
+static const uint8_t ls_sd1_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x03*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x05*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x26*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x28*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x2a*/
+
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2d*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2e*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x30*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x32*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x33*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x35*/
+ {1, 1, 0, 0, 0, 0, 0, 0}, /* 0x37*/
+
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x39*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x3b*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x4b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x4c*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4d*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x58*/
+};
+
+static const uint8_t ls_sd2_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3d*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x41*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x43*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x45*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x47*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x49*/
+
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x50*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x51*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x52*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x53*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x54*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x55*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x56*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x57*/
+};
+
+enum lsx_serdes_id {
+ LSX_SERDES_1 = 1,
+ LSX_SERDES_2 = 2
+};
+
+static const uint8_t lx_sd1_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 2 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 3 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 4 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 5 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 6 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 7 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 8 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 9 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 10 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 11 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 13 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 14 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 15 prot*/
+ {0, 0, 1, 1, 0, 0, 0, 0}, /* 16 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 17 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 18 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 19 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 20 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1}, /* 21 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1} /* 22 prot*/
+};
+
+static const uint8_t lx_sd2_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 2 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 3 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 4 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 5 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 6 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 7 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 8 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 9 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 10 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 11 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 13 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1} /* 14 prot*/
+};
+
+static inline int
+ls_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 8)
+ return LSX_SERDES_1;
+ if (mac_id >= 9 && mac_id <= 16)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+lx_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 10)
+ return LSX_SERDES_1;
+ if (mac_id >= 11 && mac_id <= 18)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+ls_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id)
+{
+ int i;
+
+ if (sd_id == LSX_SERDES_1) {
+ for (i = 0; i < (int)sizeof(ls_sd1_prot_idx_map); i++) {
+ if (ls_sd1_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ } else if (sd_id == LSX_SERDES_2) {
+ for (i = 0; i < (int)sizeof(ls_sd2_prot_idx_map); i++) {
+ if (ls_sd2_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static inline int
+lx_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id __rte_unused)
+{
+ return sd_cfg;
+}
+
+static inline int
+ls_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *ls_sd_loopback_support;
+
+ sd_id = ls_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS1_REGSR - 1]) &
+ FSL_LS_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS2_REGSR - 1]) &
+ FSL_LS_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = ls_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0) {
+ DPAA2_PMD_ERR("Serdes protocol(0x%02x) does not exist\n",
+ sd_cfg);
+ return false;
+ }
+
+ if (sd_id == LSX_SERDES_1) {
+ ls_sd_loopback_support =
+ &ls_sd1_eth_loopback_support[sd_idx][0];
+ } else {
+ ls_sd_loopback_support =
+ &ls_sd2_eth_loopback_support[sd_idx][0];
+ }
+
+ if (sd_id == LSX_SERDES_1)
+ lan_id_tmp = (mac_id - 1);
+ else
+ lan_id_tmp = (mac_id - 9);
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB) {
+ DPAA2_PMD_ERR("Invalid serdes lan(%d).", lan_id_tmp);
+ return false;
+ }
+
+ if (!ls_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+lx_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *lx_sd_loopback_support;
+
+ sd_id = lx_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS1_REGSR - 1]) &
+ FSL_LX_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS2_REGSR - 1]) &
+ FSL_LX_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = lx_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0)
+ return false;
+
+ if (sd_id == LSX_SERDES_1)
+ lx_sd_loopback_support = &lx_sd1_loopback_support[sd_idx][0];
+ else
+ lx_sd_loopback_support = &lx_sd2_loopback_support[sd_idx][0];
+
+ if (sd_id == LSX_SERDES_1) {
+ if (mac_id == 1)
+ lan_id_tmp = 0;
+ else if (mac_id == 2)
+ lan_id_tmp = 4;
+ else
+ lan_id_tmp = (mac_id - 3);
+ } else {
+ if (mac_id == 11)
+ lan_id_tmp = 0;
+ else if (mac_id == 12)
+ lan_id_tmp = 1;
+ else if (mac_id == 13)
+ lan_id_tmp = 6;
+ else if (mac_id == 14)
+ lan_id_tmp = 7;
+ else if (mac_id == 15)
+ lan_id_tmp = 4;
+ else if (mac_id == 16)
+ lan_id_tmp = 5;
+ else if (mac_id == 17)
+ lan_id_tmp = 2;
+ else if (mac_id == 18)
+ lan_id_tmp = 3;
+ else
+ return false;
+ }
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB)
+ return false;
+
+ if (!lx_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+ls_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id, lan_id;
+ int ret;
+ uint32_t data;
+ struct ccsr_ls_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = ls_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_ls_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].tsc3;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+static inline int
+lx_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id = 0xffff, lan_id = 0xffff;
+ int ret;
+ uint32_t data;
+ struct ccsr_lx_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = lx_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_lx_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].lnatcsr0;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+/* Configure dpaa2 port as recycle port */
+int
+dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret;
+
+ if (priv->flags & DPAA2_TX_LOOPBACK_MODE) {
+ DPAA2_PMD_INFO("%s has been configured recycle device.",
+ eth_dev->data->name);
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_MAC) {
+ /** For dpmac-dpni connection,
+ * try setting serdes loopback as recycle device at first.
+ */
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+
+ /** If serdes loopback is not supported for this mac,
+ * trying set mac loopback.
+ */
+
+ port_cfg.loopback_en = 1;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to enable loopback", ret);
+ return -ENOTSUP;
+ }
+
+ priv->flags |= DPAA2_TX_MAC_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_ETH &&
+ dpaa2_dev->object_id == dpaa2_dev->ep_object_id) {
+ priv->flags |= DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+int
+dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret = 0;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE))
+ return 0;
+
+ if (priv->flags & DPAA2_TX_SERDES_LOOPBACK_MODE) {
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_MAC_LOOPBACK_MODE) {
+ port_cfg.loopback_en = 0;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to disable TX mac loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_MAC_LOOPBACK_MODE;
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_DPNI_LOOPBACK_MODE)
+ priv->flags &= ~DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return ret;
+}
+
+int
+dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *data;
+ struct dpaa2_queue *txq_tmp;
+ struct dpaa2_queue *rxq_tmp;
+ struct dpaa2_dev_priv *priv;
+
+ dev = dpaa2_dev->eth_dev;
+ data = dev->data;
+ priv = data->dev_private;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE) &&
+ (tx_lpbk || rx_lpbk)) {
+ DPAA2_PMD_ERR("%s is NOT recycle device!", data->name);
+
+ return -EINVAL;
+ }
+
+ if (qidx >= data->nb_rx_queues || qidx >= data->nb_tx_queues)
+ return -EINVAL;
+
+ rte_spinlock_lock(&priv->lpbk_qp_lock);
+
+ if (tx_lpbk)
+ dev->tx_pkt_burst = tx_lpbk;
+
+ if (rx_lpbk)
+ dev->rx_pkt_burst = rx_lpbk;
+
+ txq_tmp = data->tx_queues[qidx];
+ txq_tmp->lpbk_cntx = cntx;
+ rxq_tmp = data->rx_queues[qidx];
+ rxq_tmp->lpbk_cntx = cntx;
+
+ if (txq)
+ *txq = txq_tmp;
+ if (rxq)
+ *rxq = rxq_tmp;
+
+ rte_spinlock_unlock(&priv->lpbk_qp_lock);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index b7a65cb637..7a2bc15eb4 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -3087,3 +3087,35 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
+/**
+ * dpni_set_port_cfg() - performs configurations at physical port connected on
+ * this dpni. The command have effect only if dpni is connected to
+ * another dpni object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @flags: Valid fields from port_cfg structure
+ * @port_cfg: Configuration data; one or more of DPNI_PORT_CFG_
+ * The command can be called only when dpni is connected to a dpmac object. If
+ * the dpni is unconnected or the endpoint is not a dpni it will return error.
+ * If dpmac endpoint is disconnected the settings will be lost
+ */
+int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint32_t flags, struct dpni_port_cfg *port_cfg)
+{
+ struct dpni_cmd_set_port_cfg *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PORT_CFG,
+ cmd_flags, token);
+
+ cmd_params = (struct dpni_cmd_set_port_cfg *)cmd.params;
+ cmd_params->flags = cpu_to_le32(flags);
+ dpni_set_field(cmd_params->bit_params, PORT_LOOPBACK_EN,
+ !!port_cfg->loopback_en);
+
+ /* send command to MC */
+ return mc_send_command(mc_io, &cmd);
+}
+
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index ed0bd7615a..b7bd7556af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -119,6 +119,7 @@
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_PORT_CFG DPNI_CMD(0x27B)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
index 21b827a259..51598c048c 100644
--- a/drivers/net/dpaa2/meson.build
+++ b/drivers/net/dpaa2/meson.build
@@ -14,6 +14,7 @@ sources = files(
'dpaa2_mux.c',
'dpaa2_ethdev.c',
'dpaa2_flow.c',
+ 'dpaa2_recycle.c',
'dpaa2_rxtx.c',
'dpaa2_sparser.c',
'dpaa2_ptp.c',
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 11/17] net/dpaa: check status before configuring shared MAC
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (9 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 10/17] net/dpaa2: support recycle loopback port nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 12/17] net/dpaa: enable checksum for shared MAC interface nipun.gupta
` (7 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
For shared MAC interface, it is a prerequisite to enable the
interface in the kernel, before using it in user-space. This
patch makes sure that device is not getting configured in
case shared MAC interface is not enabled in the kernel.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/dpaa/base/fman/fman_hw.c | 11 +++++++++++
drivers/bus/dpaa/include/fsl_fman.h | 2 ++
drivers/bus/dpaa/version.map | 1 +
drivers/net/dpaa/dpaa_ethdev.c | 13 ++++++++++++-
4 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index af9bac76c2..24a99f7235 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -314,6 +314,17 @@ fman_if_disable_rx(struct fman_if *p)
out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
}
+int
+fman_if_get_rx_status(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* return true if RX bit is set */
+ return !!(in_be32(__if->ccsr_map + 8) & (u32)2);
+}
+
void
fman_if_loopback_enable(struct fman_if *p)
{
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index f3a5d05970..acb344584f 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -81,6 +81,8 @@ __rte_internal
void fman_if_enable_rx(struct fman_if *p);
__rte_internal
void fman_if_disable_rx(struct fman_if *p);
+__rte_internal
+int fman_if_get_rx_status(struct fman_if *p);
/* Enable/disable loopback on specific interfaces */
__rte_internal
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 900635b210..1a840fd1a5 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -33,6 +33,7 @@ INTERNAL {
fman_if_get_fdoff;
fman_if_get_maxfrm;
fman_if_get_sg_enable;
+ fman_if_get_rx_status;
fman_if_loopback_disable;
fman_if_loopback_enable;
fman_if_promiscuous_disable;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index e49f765434..3972ecaed8 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -195,6 +195,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint64_t tx_offloads = eth_conf->txmode.offloads;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct rte_device *rdev = dev->device;
struct rte_eth_link *link = &dev->data->dev_link;
struct rte_dpaa_device *dpaa_dev;
@@ -203,7 +204,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_intr_handle *intr_handle;
uint32_t max_rx_pktlen;
int speed, duplex;
- int ret;
+ int ret, rx_status;
PMD_INIT_FUNC_TRACE();
@@ -211,6 +212,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
intr_handle = dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
+ /* Check if interface is enabled in case of shared MAC */
+ if (fif->is_shared_mac) {
+ rx_status = fman_if_get_rx_status(fif);
+ if (!rx_status) {
+ DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+ dpaa_intf->name);
+ return -EHOSTDOWN;
+ }
+ }
+
/* Rx offloads which are enabled by default */
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_INFO(
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 12/17] net/dpaa: enable checksum for shared MAC interface
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (10 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 11/17] net/dpaa: check status before configuring shared MAC nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 13/17] net/enetc: add support for VFs nipun.gupta
` (6 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Brick Yang, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
In case of shared MAC B0V bit in contextA is required
to be set to set so that ASPID is 0.
Signed-off-by: Brick Yang <brick.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/dpaa/dpaa_ethdev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 3972ecaed8..7135a5998d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1755,6 +1755,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ if (fman_ip_rev >= FMAN_V3) {
+ /* Set B0V bit in contextA to set ASPID to 0 */
+ opts.fqd.context_a.hi |= 0x04000000;
+ }
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
if (cgr_tx) {
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 13/17] net/enetc: add support for VFs
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (11 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 12/17] net/dpaa: enable checksum for shared MAC interface nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 14/17] net/pfe: disable HW CRC stripping nipun.gupta
` (5 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
Add virtual function support for enetc devices
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/enetc/enetc_ethdev.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 7cdb8ce463..1b4337bc48 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -19,6 +19,9 @@ enetc_dev_start(struct rte_eth_dev *dev)
uint32_t val;
PMD_INIT_FUNC_TRACE();
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
@@ -55,6 +58,9 @@ enetc_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->data->dev_started = 0;
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
/* Disable port */
val = enetc_port_rd(enetc_hw, ENETC_PMR);
enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
@@ -160,11 +166,20 @@ enetc_hardware_init(struct enetc_eth_hw *hw)
/* Enabling Station Interface */
enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
- *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
- high_mac = (uint32_t)*mac;
- mac++;
- *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
- low_mac = (uint16_t)*mac;
+
+ if (hw->device_id == ENETC_DEV_ID_VF) {
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
+ low_mac = (uint16_t)*mac;
+ } else {
+ *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
+ low_mac = (uint16_t)*mac;
+ }
if ((high_mac | low_mac) == 0) {
char *first_byte;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 14/17] net/pfe: disable HW CRC stripping
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (12 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 13/17] net/enetc: add support for VFs nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 15/17] net/pfe: reduce driver initialization time nipun.gupta
` (4 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
LS1012A MAC PCS block has an erratum that is seen with specific
PHY AR803x. The issue is triggered by the (spec-compliant)
operation of the AR803x PHY on the LS1012A-FRWY board.
Due to this, good FCS packet is reported as error packet by MAC,
so for these error packets FCS should be validated and discard
only real error packets in PFE engine Rx packet path. Now onwards
CRC validation will be handled in pfe.ko and DPDK driver can not
use CRC Forwarding option.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/pfe/pfe_ethdev.c | 7 +++++--
drivers/net/pfe/pfe_hal.c | 4 ++--
2 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 047010e15e..bfcaf51dd9 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include <sys/ioctl.h>
@@ -422,8 +422,11 @@ pfe_eth_close(struct rte_eth_dev *dev)
}
static int
-pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
+pfe_eth_configure(struct rte_eth_dev *dev)
{
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ PFE_PMD_ERR("PMD does not support KEEP_CRC offload");
+
return 0;
}
diff --git a/drivers/net/pfe/pfe_hal.c b/drivers/net/pfe/pfe_hal.c
index 41d783dbff..f49d1728b2 100644
--- a/drivers/net/pfe/pfe_hal.c
+++ b/drivers/net/pfe/pfe_hal.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include <arpa/inet.h>
@@ -191,7 +191,7 @@ gemac_set_mode(void *base, __rte_unused int mode)
val &= ~EMAC_RCNTRL_LOOP;
/*Enable flow control and MII mode*/
- val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
writel(val, base + EMAC_RCNTRL_REG);
}
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 15/17] net/pfe: reduce driver initialization time
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (13 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 14/17] net/pfe: disable HW CRC stripping nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 16/17] net/pfe: remove setting unused value nipun.gupta
` (3 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
This patch reduces the delay in the device init.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/pfe/pfe_hif.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
index c4a7154ba7..eade726b2e 100644
--- a/drivers/net/pfe/pfe_hif.c
+++ b/drivers/net/pfe/pfe_hif.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include "pfe_logs.h"
@@ -9,6 +9,8 @@
#include <sys/eventfd.h>
#include <arpa/inet.h>
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define msleep(x) rte_delay_us(1000 * (x))
static int
pfe_hif_alloc_descr(struct pfe_hif *hif)
{
@@ -766,7 +768,7 @@ pfe_hif_rx_idle(struct pfe_hif *hif)
if (rx_status & BDP_CSR_RX_DMA_ACTV)
send_dummy_pkt_to_hif();
- sleep(1);
+ msleep(DIV_ROUND_UP(100, 1000));
} while (--hif_stop_loop);
if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 16/17] net/pfe: remove setting unused value
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (14 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 15/17] net/pfe: reduce driver initialization time nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-06 12:18 ` [PATCH 17/17] net/pfe: fix for 32 bit and PPC compilation nipun.gupta
` (2 subsequent siblings)
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Apeksha Gupta
From: Apeksha Gupta <apeksha.gupta@nxp.com>
remove setting link status where it is not being used
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
drivers/net/pfe/pfe_ethdev.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index bfcaf51dd9..5a3008cbb5 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -590,8 +590,7 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
if (ret != 0) {
PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
- /* use dummy link value */
- link.link_status = 1;
+ return -1;
}
PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
lstatus, priv->id);
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH 17/17] net/pfe: fix for 32 bit and PPC compilation
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (15 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 16/17] net/pfe: remove setting unused value nipun.gupta
@ 2021-12-06 12:18 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
18 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-06 12:18 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stable, Sachin Saxena
From: Sachin Saxena <sachin.saxena@nxp.com>
This patch fixes compilation for 32 bit and power PC
compiler.
Fixes: 36220514de01 ("net/pfe: add Rx/Tx")
Cc: stable@dpdk.org
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
---
drivers/net/pfe/pfe_hif.c | 3 ++-
drivers/net/pfe/pfe_hif_lib.c | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
index eade726b2e..67efb8b3a7 100644
--- a/drivers/net/pfe/pfe_hif.c
+++ b/drivers/net/pfe/pfe_hif.c
@@ -309,7 +309,8 @@ client_put_rxpacket(struct hif_rx_queue *queue,
if (readl(&desc->ctrl) & CL_DESC_OWN) {
- mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
+ mbuf = (struct rte_mbuf *)
+ rte_cpu_to_le_64((uintptr_t)rte_pktmbuf_alloc(pool));
if (unlikely(!mbuf)) {
PFE_PMD_WARN("Buffer allocation failure\n");
return NULL;
diff --git a/drivers/net/pfe/pfe_hif_lib.c b/drivers/net/pfe/pfe_hif_lib.c
index 799050dce3..2487273a7c 100644
--- a/drivers/net/pfe/pfe_hif_lib.c
+++ b/drivers/net/pfe/pfe_hif_lib.c
@@ -50,7 +50,8 @@ pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
- mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
+ mbuf = (struct rte_mbuf *)
+ rte_cpu_to_le_64((uintptr_t)rte_pktmbuf_alloc(mb_pool));
if (mbuf)
hif_shm->rx_buf_pool[i] = mbuf;
else
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 00/16] features and fixes on NXP eth devices
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (16 preceding siblings ...)
2021-12-06 12:18 ` [PATCH 17/17] net/pfe: fix for 32 bit and PPC compilation nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 01/16] bus/fslmc: update MC to 10.29 nipun.gupta
` (16 more replies)
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
18 siblings, 17 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal
From: Nipun Gupta <nipun.gupta@nxp.com>
This series adds few features and important fixes on DPAA,
PFE and ENETC devices.
Features added:
- level 2 support for shaping on DPAA2
- loopback configuration for DPNI devices on DPAA2
- Multiple TXQ's enqueue for ordered queues for performance
- VFs support on ENETC
Fixes:
- fix unregistering interrupt handler on DPAA2
- fix timestamping for IEEE1588 on DPAA1
Changes in v2:
- fix checkpatch errors
Apeksha Gupta (1):
net/pfe: remove setting unused value
Gagandeep Singh (4):
net/dpaa2: add support for level 2 in traffic management
net/enetc: add support for VFs
net/pfe: disable HW CRC stripping
net/pfe: reduce driver initialization time
Jun Yang (4):
net/dpaa2: support multiple txqs en-queue for ordered
net/dpaa2: secondary process handling for dpni
bus/fslmc: add and scan dprc devices
net/dpaa2: support recycle loopback port
Nipun Gupta (4):
bus/fslmc: update MC to 10.29
bus/fslmc: use dmb oshst for synchronization before I/O
net/dpaa: check status before configuring shared MAC
net/dpaa: enable checksum for shared MAC interface
Rohit Raj (1):
net/dpaa2: warn user in case of high nb desc
Vanshika Shukla (2):
net/dpaa2: fix unregistering interrupt handler
net/dpaa2: fix timestamping for IEEE1588
doc/guides/nics/dpaa2.rst | 2 +-
drivers/bus/dpaa/base/fman/fman_hw.c | 11 +
drivers/bus/dpaa/include/fsl_fman.h | 2 +
drivers/bus/dpaa/version.map | 1 +
drivers/bus/fslmc/fslmc_bus.c | 15 +-
drivers/bus/fslmc/fslmc_vfio.c | 18 +-
drivers/bus/fslmc/mc/dprc.c | 129 ++++
drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++
drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 ++
drivers/bus/fslmc/meson.build | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 +++
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 15 +-
drivers/bus/fslmc/qbman/include/compat.h | 4 +-
drivers/bus/fslmc/rte_fslmc.h | 10 +-
drivers/event/dpaa2/dpaa2_eventdev.c | 12 +-
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +
drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +
drivers/mempool/dpaa2/version.map | 1 +
drivers/net/dpaa/dpaa_ethdev.c | 17 +-
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 117 +++-
drivers/net/dpaa2/dpaa2_ethdev.h | 38 +-
drivers/net/dpaa2/dpaa2_ptp.c | 8 +-
drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++
drivers/net/dpaa2/dpaa2_rxtx.c | 181 ++++-
drivers/net/dpaa2/dpaa2_tm.c | 563 +++++++++++++---
drivers/net/dpaa2/dpaa2_tm.h | 17 +-
drivers/net/dpaa2/mc/dpdmux.c | 8 +
drivers/net/dpaa2/mc/dpkg.c | 7 +-
drivers/net/dpaa2/mc/dpni.c | 417 ++++++++----
drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
drivers/net/dpaa2/mc/fsl_dpni.h | 173 +++--
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 137 ++--
drivers/net/dpaa2/meson.build | 1 +
drivers/net/dpaa2/version.map | 1 +
drivers/net/enetc/enetc_ethdev.c | 25 +-
drivers/net/pfe/pfe_ethdev.c | 10 +-
drivers/net/pfe/pfe_hal.c | 4 +-
drivers/net/pfe/pfe_hif.c | 6 +-
41 files changed, 2528 insertions(+), 453 deletions(-)
create mode 100644 drivers/bus/fslmc/mc/dprc.c
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 01/16] bus/fslmc: update MC to 10.29
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 02/16] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
` (15 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta, Gagandeep Singh
From: Nipun Gupta <nipun.gupta@nxp.com>
update MC firmware library version to 10.29
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
drivers/net/dpaa2/mc/dpdmux.c | 8 ++
drivers/net/dpaa2/mc/dpkg.c | 7 +-
drivers/net/dpaa2/mc/dpni.c | 111 ++++++++++++++++++++------
drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
drivers/net/dpaa2/mc/fsl_dpni.h | 54 ++++++++++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 57 +++++++------
8 files changed, 181 insertions(+), 66 deletions(-)
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index 7e9bd96429..073d47efbf 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 28
+#define MC_VER_MINOR 29
/**
* struct mc_version
diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
index edbb01b45b..1bb153cad7 100644
--- a/drivers/net/dpaa2/mc/dpdmux.c
+++ b/drivers/net/dpaa2/mc/dpdmux.c
@@ -398,6 +398,9 @@ int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
attr->mem_size = le16_to_cpu(rsp_params->mem_size);
attr->default_if = le16_to_cpu(rsp_params->default_if);
+ attr->max_dmat_entries = le16_to_cpu(rsp_params->max_dmat_entries);
+ attr->max_mc_groups = le16_to_cpu(rsp_params->max_mc_groups);
+ attr->max_vlan_ids = le16_to_cpu(rsp_params->max_vlan_ids);
return 0;
}
@@ -470,6 +473,11 @@ int dpdmux_if_disable(struct fsl_mc_io *mc_io,
* will be updated with the minimum value of the mfls of the connected
* dpnis and the actual value of dmux mfl.
*
+ * If dpdmux object is created using DPDMUX_OPT_AUTO_MAX_FRAME_LEN and maximum
+ * frame length is changed for a dpni connected to dpdmux interface the change
+ * is propagated through dpdmux interfaces and will overwrite the value set using
+ * this API.
+ *
* Return: '0' on Success; Error code otherwise.
*/
int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
index 1e171eedc7..4789976b7d 100644
--- a/drivers/net/dpaa2/mc/dpkg.c
+++ b/drivers/net/dpaa2/mc/dpkg.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
- * Copyright 2017 NXP
+ * Copyright 2017-2021 NXP
*
*/
#include <fsl_mc_sys.h>
@@ -63,10 +63,7 @@ dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
cfg->extracts[i].type);
- if (extr->num_of_byte_masks > DPKG_NUM_OF_MASKS)
- return -EINVAL;
-
- for (j = 0; j < extr->num_of_byte_masks; j++) {
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
extr->masks[j].offset =
cfg->extracts[i].masks[j].offset;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 60048d6c43..cf78295d90 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -128,6 +128,7 @@ int dpni_create(struct fsl_mc_io *mc_io,
cmd_params->num_cgs = cfg->num_cgs;
cmd_params->num_opr = cfg->num_opr;
cmd_params->dist_key_size = cfg->dist_key_size;
+ cmd_params->num_channels = cfg->num_channels;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -203,7 +204,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
cmd_params->pool_options = cfg->pool_options;
- for (i = 0; i < cmd_params->num_dpbp; i++) {
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
@@ -592,6 +593,7 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
attr->num_tx_tcs = rsp_params->num_tx_tcs;
attr->mac_filter_entries = rsp_params->mac_filter_entries;
attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->num_channels = rsp_params->num_channels;
attr->qos_entries = rsp_params->qos_entries;
attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
attr->qos_key_size = rsp_params->qos_key_size;
@@ -815,6 +817,9 @@ int dpni_get_offload(struct fsl_mc_io *mc_io,
* in all enqueue operations
*
* Return: '0' on Success; Error code otherwise.
+ *
+ * If dpni object is created using multiple Tc channels this function will return
+ * qdid value for the first channel
*/
int dpni_get_qdid(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -958,7 +963,12 @@ int dpni_get_link_state(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @tx_cr_shaper: TX committed rate shaping configuration
* @tx_er_shaper: TX excess rate shaping configuration
- * @coupled: Committed and excess rate shapers are coupled
+ * @param: Special parameters
+ * bit0: Committed and excess rates are coupled
+ * bit1: 1 modify LNI shaper, 0 modify channel shaper
+ * bit8-15: Tx channel to be shaped. Used only if bit1 is set to zero
+ * bits16-26: OAL (Overhead accounting length 11bit value). Used only
+ * when bit1 is set.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -967,10 +977,13 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled)
+ uint32_t param)
{
struct dpni_cmd_set_tx_shaping *cmd_params;
struct mc_command cmd = { 0 };
+ int coupled, lni_shaper;
+ uint8_t channel_id;
+ uint16_t oal;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
@@ -985,7 +998,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
cpu_to_le32(tx_cr_shaper->rate_limit);
cmd_params->tx_er_rate_limit =
cpu_to_le32(tx_er_shaper->rate_limit);
- dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ coupled = !!(param & 0x01);
+ dpni_set_field(cmd_params->options, COUPLED, coupled);
+
+ lni_shaper = !!((param >> 1) & 0x01);
+ dpni_set_field(cmd_params->options, LNI_SHAPER, lni_shaper);
+
+ channel_id = (param >> 8) & 0xff;
+ cmd_params->channel_id = channel_id;
+
+ oal = (param >> 16) & 0x7FF;
+ cmd_params->oal = cpu_to_le16(oal);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1543,6 +1567,7 @@ int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
+ cmd_params->channel_idx = cfg->channel_idx;
dpni_set_field(cmd_params->flags,
SEPARATE_GRP,
cfg->separate_groups);
@@ -2053,7 +2078,13 @@ void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
* with the early-drop configuration by calling dpni_prepare_early_drop()
*
@@ -2066,7 +2097,7 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2078,7 +2109,8 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2091,7 +2123,13 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
*
* warning: After calling this function, call dpni_extract_early_drop() to
@@ -2103,7 +2141,7 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2115,7 +2153,8 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2138,8 +2177,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
@@ -2151,7 +2190,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = (uint8_t)cfg->cgid;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
@@ -2179,7 +2219,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel. Bits[0-7] contain traaffic class,
+ * byte[8-15] contains channel id
* @cfg: congestion notification configuration
*
* Return: '0' on Success; error code otherwise.
@@ -2188,8 +2229,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_rsp_get_congestion_notification *rsp_params;
struct dpni_cmd_get_congestion_notification *cmd_params;
@@ -2203,7 +2244,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = cfg->cgid;
@@ -2280,7 +2322,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue)
@@ -2294,7 +2336,8 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->options = options;
cmd_params->dest_id = cpu_to_le32(queue->destination.id);
@@ -2317,7 +2360,13 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - all queue types are supported
- * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @index: Selects the specific queue out of the set allocated for the
* same TC. Value must be in range 0 to NUM_QUEUES - 1
* @queue: Queue configuration structure
@@ -2329,7 +2378,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid)
@@ -2345,8 +2394,9 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
cmd_params->index = index;
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
@@ -2382,8 +2432,16 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
* DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
- * @param: Custom parameter for some pages used to select
- * a certain statistic source, for example the TC.
+ * @param: Custom parameter for some pages used to select
+ * a certain statistic source, for example the TC.
+ * - page_0: not used
+ * - page_1: not used
+ * - page_2: not used
+ * - page_3: high_byte - channel_id, low_byte - traffic class
+ * - page_4: high_byte - queue_index have meaning only if dpni is
+ * created using option DPNI_OPT_CUSTOM_CG, low_byte - traffic class
+ * - page_5: not used
+ * - page_6: not used
* @stat: Structure containing the statistics
*
* Return: '0' on Success; Error code otherwise.
@@ -2471,7 +2529,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_taildrop *taildrop)
{
@@ -2485,7 +2543,8 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
cmd_params->congestion_point = cg_point;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->units = taildrop->units;
cmd_params->threshold = cpu_to_le32(taildrop->threshold);
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
index b01a98eb59..4600ea94d4 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
@@ -184,6 +184,9 @@ struct dpdmux_attr {
uint16_t num_ifs;
uint16_t mem_size;
uint16_t default_if;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
index f8a1b5b1ae..bf6b8a20d1 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
@@ -35,7 +35,7 @@
#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
-#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V2(0x004)
+#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V3(0x004)
#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
@@ -119,6 +119,9 @@ struct dpdmux_rsp_get_attr {
uint32_t pad2;
uint64_t options;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
struct dpdmux_cmd_set_max_frame_length {
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 469ab9b3d4..8aead28261 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -36,6 +36,10 @@ struct fsl_mc_io;
* Maximum number of storage-profiles per DPNI
*/
#define DPNI_MAX_SP 2
+/**
+ * Maximum number of Tx channels per DPNI
+ */
+#define DPNI_MAX_CHANNELS 16
/**
* All traffic classes considered; see dpni_set_queue()
@@ -117,6 +121,13 @@ struct fsl_mc_io;
*/
#define DPNI_SW_SEQUENCE_LAYOUT_SIZE 33
+/**
+ * Build a parameter from dpni channel and trafiic class. This parameter
+ * will be used to configure / query information from dpni objects created
+ * to support multiple channels.
+ */
+#define DPNI_BUILD_PARAM(channel, tc_id) (((channel) << 8) | (tc_id))
+
int dpni_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpni_id,
@@ -187,6 +198,8 @@ int dpni_close(struct fsl_mc_io *mc_io,
* field is ignored if the DPNI has a single TC. Otherwise,
* a value of 0 defaults to 64. Maximum supported value
* is 64.
+ * @num_channels: Number of egress channels used by this dpni object. If
+ * set to zero the dpni object will use a single CEETM channel.
*/
struct dpni_cfg {
uint32_t options;
@@ -200,6 +213,7 @@ struct dpni_cfg {
uint8_t num_cgs;
uint16_t num_opr;
uint8_t dist_key_size;
+ uint8_t num_channels;
};
int dpni_create(struct fsl_mc_io *mc_io,
@@ -362,6 +376,7 @@ struct dpni_attr {
uint8_t fs_key_size;
uint16_t wriop_version;
uint8_t num_cgs;
+ uint8_t num_channels;
};
int dpni_get_attributes(struct fsl_mc_io *mc_io,
@@ -779,12 +794,29 @@ struct dpni_tx_shaping_cfg {
uint16_t max_burst_size;
};
+/**
+ * Build the parameter for dpni_set_tx_shaping() call
+ * @oal: Overhead accounting length. 11bit value added to the size of
+ * each frame. Used only for LNI shaping. If set to zero, will use default
+ * value of 24. Ignored if shaping_lni is set to zero.
+ * @shaping_lni: 1 for LNI shaping (configure whole throughput of the dpni object)
+ * 0 for channel shaping (configure shaping for individual channels)
+ * Set to one only if dpni is connected to a dpmac object.
+ * @channel_id: Channel to be configured. Ignored shaping_lni is set to 1
+ * @coupled: Committed and excess rates are coupled
+ */
+#define DPNI_TX_SHAPING_PARAM(oal, shaping_lni, channel_id, coupled) ( \
+ ((uint32_t)(((oal) & 0x7ff) << 16)) | \
+ ((uint32_t)((channel_id) & 0xff) << 8) | \
+ ((uint32_t)(!!shaping_lni) << 1) | \
+ ((uint32_t)!!coupled))
+
int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled);
+ uint32_t param);
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -918,12 +950,14 @@ struct dpni_tx_schedule_cfg {
/**
* struct dpni_tx_priorities_cfg - Structure representing transmission
* priorities for DPNI TCs
+ * @channel_idx: channel to perform the configuration
* @tc_sched: An array of traffic-classes
* @prio_group_A: Priority of group A
* @prio_group_B: Priority of group B
* @separate_groups: Treat A and B groups as separate
*/
struct dpni_tx_priorities_cfg {
+ uint8_t channel_idx;
struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
uint32_t prio_group_A;
uint32_t prio_group_B;
@@ -1155,14 +1189,14 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
/**
@@ -1290,15 +1324,15 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg);
int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg);
/* DPNI FLC stash options */
@@ -1590,7 +1624,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue);
@@ -1599,7 +1633,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid);
@@ -1643,7 +1677,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type q_type,
- uint8_t tc,
+ uint16_t param,
uint8_t q_index,
struct dpni_taildrop *taildrop);
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 6fbd93bb38..8bff2ec9af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -8,14 +8,15 @@
#define _FSL_DPNI_CMD_H
/* DPNI Version */
-#define DPNI_VER_MAJOR 7
-#define DPNI_VER_MINOR 17
+#define DPNI_VER_MAJOR 8
+#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_VERSION_2 2
#define DPNI_CMD_VERSION_3 3
#define DPNI_CMD_VERSION_4 4
#define DPNI_CMD_VERSION_5 5
+#define DPNI_CMD_VERSION_6 6
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
@@ -23,17 +24,18 @@
#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3)
#define DPNI_CMD_V4(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_4)
#define DPNI_CMD_V5(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_5)
+#define DPNI_CMD_V6(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_6)
/* Command IDs */
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-#define DPNI_CMDID_CREATE DPNI_CMD_V5(0x901)
+#define DPNI_CMDID_CREATE DPNI_CMD_V6(0x901)
#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
-#define DPNI_CMDID_GET_ATTR DPNI_CMD_V3(0x004)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD_V4(0x004)
#define DPNI_CMDID_RESET DPNI_CMD(0x005)
#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
@@ -54,7 +56,7 @@
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A)
-#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V3(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
@@ -83,25 +85,25 @@
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
-#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
+#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V3(0x250)
#define DPNI_CMDID_GET_RX_TC_POLICING DPNI_CMD(0x251)
-#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D)
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V4(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
-#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F)
-#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V2(0x260)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V3(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V3(0x260)
#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261)
-#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V3(0x262)
#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264)
#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265)
-#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x267)
-#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x268)
-#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269)
-#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V3(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V3(0x26A)
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
@@ -136,7 +138,7 @@ struct dpni_cmd_create {
uint8_t num_queues;
uint8_t num_tcs;
uint8_t mac_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t vlan_filter_entries;
uint8_t pad2;
uint8_t qos_entries;
@@ -230,7 +232,7 @@ struct dpni_rsp_get_attr {
uint8_t num_tx_tcs;
/* response word 1 */
uint8_t vlan_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t qos_entries;
uint8_t pad2;
uint16_t fs_entries;
@@ -367,6 +369,8 @@ struct dpni_rsp_get_link_state {
#define DPNI_COUPLED_SHIFT 0
#define DPNI_COUPLED_SIZE 1
+#define DPNI_LNI_SHAPER_SHIFT 1
+#define DPNI_LNI_SHAPER_SIZE 1
struct dpni_cmd_set_tx_shaping {
uint16_t tx_cr_max_burst_size;
@@ -374,8 +378,10 @@ struct dpni_cmd_set_tx_shaping {
uint32_t pad;
uint32_t tx_cr_rate_limit;
uint32_t tx_er_rate_limit;
- /* from LSB: coupled:1 */
- uint8_t coupled;
+ /* from LSB: coupled:1, lni_shaper: 1*/
+ uint8_t options;
+ uint8_t channel_id;
+ uint16_t oal;
};
struct dpni_cmd_set_max_frame_length {
@@ -466,7 +472,8 @@ struct dpni_cmd_set_tx_priorities {
uint16_t flags;
uint8_t prio_group_A;
uint8_t prio_group_B;
- uint32_t pad0;
+ uint8_t channel_idx;
+ uint8_t pad0[3];
uint8_t modes[4];
uint32_t pad1;
uint64_t pad2;
@@ -499,6 +506,7 @@ struct dpni_cmd_get_queue {
uint8_t qtype;
uint8_t tc;
uint8_t index;
+ uint8_t channel_id;
};
#define DPNI_DEST_TYPE_SHIFT 0
@@ -551,6 +559,7 @@ struct dpni_cmd_set_queue {
uint64_t user_context;
/* cmd word 4 */
uint8_t cgid;
+ uint8_t channel_id;
};
#define DPNI_DISCARD_ON_MISS_SHIFT 0
@@ -683,7 +692,8 @@ struct dpni_early_drop {
struct dpni_cmd_early_drop {
uint8_t qtype;
uint8_t tc;
- uint8_t pad[6];
+ uint8_t channel_id;
+ uint8_t pad[5];
uint64_t early_drop_iova;
};
@@ -723,7 +733,8 @@ struct dpni_cmd_set_taildrop {
uint8_t qtype;
uint8_t tc;
uint8_t index;
- uint32_t pad0;
+ uint8_t channel_id;
+ uint8_t pad0[3];
/* cmd word 1 */
/* from LSB: enable:1 oal_lo:7 */
uint8_t enable_oal_lo;
@@ -747,7 +758,7 @@ struct dpni_tx_confirmation_mode {
struct dpni_cmd_set_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
uint8_t pad2[3];
@@ -765,7 +776,7 @@ struct dpni_cmd_set_congestion_notification {
struct dpni_cmd_get_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
};
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 02/16] bus/fslmc: use dmb oshst for synchronization before I/O
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
2021-12-27 16:16 ` [PATCH v2 01/16] bus/fslmc: update MC to 10.29 nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 03/16] net/dpaa2: warn user in case of high nb desc nipun.gupta
` (14 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
Outer Shareable Store (oshst) is sufficient for Data Memory
Barrier (dmb) when doing IO on the interface via QBMAN.
This will sync L3/DDR with the L1/L2 cached data.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/qbman/include/compat.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index a4471a80af..ece5da5906 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
+ * Copyright 2017,2021 NXP
*
*/
@@ -81,7 +81,7 @@ do { \
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
-#define dma_wmb() rte_smp_mb()
+#define dma_wmb() rte_io_wmb()
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 03/16] net/dpaa2: warn user in case of high nb desc
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
2021-12-27 16:16 ` [PATCH v2 01/16] bus/fslmc: update MC to 10.29 nipun.gupta
2021-12-27 16:16 ` [PATCH v2 02/16] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 04/16] net/dpaa2: fix unregistering interrupt handler nipun.gupta
` (13 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Rohit Raj
From: Rohit Raj <rohit.raj@nxp.com>
Added warning message if application is configuring nb_desc
more than supported by PEB memory suggesting user to configure
HW descriptors in normal memory rather than in faster PEB
memory.
Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a3706439d5..f5cac8f9d9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -74,6 +74,9 @@ int dpaa2_timestamp_dynfield_offset = -1;
/* Enable error queue */
bool dpaa2_enable_err_queue;
+#define MAX_NB_RX_DESC 11264
+int total_nb_rx_desc;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -694,6 +697,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
dev, rx_queue_id, mb_pool, rx_conf);
+ total_nb_rx_desc += nb_rx_desc;
+ if (total_nb_rx_desc > MAX_NB_RX_DESC) {
+ DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
+ MAX_NB_RX_DESC);
+ DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
+ }
+
/* Rx deferred start is not supported */
if (rx_conf->rx_deferred_start) {
DPAA2_PMD_ERR("%p:Rx deferred start not supported",
@@ -984,6 +994,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
memset(&cfg, 0, sizeof(struct dpni_queue));
PMD_INIT_FUNC_TRACE();
+
+ total_nb_rx_desc -= dpaa2_q->nb_desc;
+
if (dpaa2_q->cgid != 0xff) {
options = DPNI_QUEUE_OPT_CLEAR_CGID;
cfg.cgid = dpaa2_q->cgid;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 04/16] net/dpaa2: fix unregistering interrupt handler
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (2 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 03/16] net/dpaa2: warn user in case of high nb desc nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 05/16] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
` (12 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
This patch fixes code that handles unregistering LSC
interrupt handler in dpaa2_dev_stop API.
Fixes: c5acbb5ea20e ("net/dpaa2: support link status event")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index f5cac8f9d9..18ff07249f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1265,7 +1265,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret;
struct rte_eth_link link;
- struct rte_intr_handle *intr_handle = dev->intr_handle;
+ struct rte_device *rdev = dev->device;
+ struct rte_intr_handle *intr_handle;
+ struct rte_dpaa2_device *dpaa2_dev;
+
+ dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+ intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 05/16] net/dpaa2: fix timestamping for IEEE1588
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (3 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 04/16] net/dpaa2: fix unregistering interrupt handler nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
` (11 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
The current implementation of DPAA2 driver code is such
that it records Rx and Tx timestamp for PTP without checking
if they are PTP packets or not. Packets for which
RTE_MBUF_F_RX_IEEE1588_TMST and RTE_MBUF_F_TX_IEEE1588_TMST
is not set, Rx and Tx timestamp should not be recorded.
This patch fixes this issue by checking if the required
flags are set in the mbuf before recording timestamps.
Also this change defines separate values for
DPAA2_TX_CONF_ENABLE and DPAA2_NO_PREFETCH_RX
Fixes: e806bf878c17 ("net/dpaa2: support timestamp")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.h | 2 +-
drivers/net/dpaa2/dpaa2_ptp.c | 8 ++++---
drivers/net/dpaa2/dpaa2_rxtx.c | 39 +++++++++++++++++++++++++-------
3 files changed, 37 insertions(+), 12 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c5e9267bf0..c21571e63d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -62,7 +62,7 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
/* Tx confirmation enabled */
-#define DPAA2_TX_CONF_ENABLE 0x08
+#define DPAA2_TX_CONF_ENABLE 0x06
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
diff --git a/drivers/net/dpaa2/dpaa2_ptp.c b/drivers/net/dpaa2/dpaa2_ptp.c
index 8d79e39244..3a4536dd69 100644
--- a/drivers/net/dpaa2/dpaa2_ptp.c
+++ b/drivers/net/dpaa2/dpaa2_ptp.c
@@ -111,10 +111,12 @@ int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- if (priv->next_tx_conf_queue)
- dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
- else
+ if (priv->next_tx_conf_queue) {
+ while (!priv->tx_timestamp)
+ dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
+ } else {
return -1;
+ }
*timestamp = rte_ns_to_timespec(priv->tx_timestamp);
return 0;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index c65589a5f3..ee3ed1b152 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -140,8 +140,10 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
annotation->word3, annotation->word4);
#if defined(RTE_LIBRTE_IEEE1588)
- if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
+ if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+ }
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
@@ -769,7 +771,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
else
bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
#if defined(RTE_LIBRTE_IEEE1588)
- priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
#endif
if (eth_data->dev_conf.rxmode.offloads &
@@ -986,6 +991,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
bufs[num_rx] = eth_fd_to_mbuf(fd,
eth_data->port_id);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
+#endif
+
if (eth_data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
struct dpaa2_annot_hdr *annotation;
+ void *v_addr;
+ struct rte_mbuf *mbuf;
#endif
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
num_tx_conf++;
num_pulled++;
#if defined(RTE_LIBRTE_IEEE1588)
- annotation = (struct dpaa2_annot_hdr *)((size_t)
- DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE);
- priv->tx_timestamp = annotation->word2;
+ v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
+ annotation = (struct dpaa2_annot_hdr *)((size_t)
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE);
+ priv->tx_timestamp = annotation->word2;
+ }
#endif
} while (pending);
@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* corresponding to last packet transmitted for reading
* the timestamp
*/
- priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
- dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
+ priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+ dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ priv->tx_timestamp = 0;
+ }
#endif
/*Prepare enqueue descriptor*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (4 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 05/16] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 18:01 ` Stephen Hemminger
2021-12-27 16:16 ` [PATCH v2 07/16] net/dpaa2: add support for level 2 in traffic management nipun.gupta
` (10 subsequent siblings)
16 siblings, 1 reply; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
Support the tx enqueue in order queue mode, where queue id
for each event may be different.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/event/dpaa2/dpaa2_eventdev.c | 12 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 4 +
drivers/net/dpaa2/dpaa2_rxtx.c | 142 +++++++++++++++++++++++++++
drivers/net/dpaa2/version.map | 1 +
4 files changed, 155 insertions(+), 4 deletions(-)
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4d94c315d2..f3d8a7e4f1 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017,2019 NXP
+ * Copyright 2017,2019-2021 NXP
*/
#include <assert.h>
@@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
struct rte_event ev[],
uint16_t nb_events)
{
- struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ void *txq[32];
+ struct rte_mbuf *m[32];
uint8_t qid, i;
RTE_SET_USED(port);
for (i = 0; i < nb_events; i++) {
- qid = rte_event_eth_tx_adapter_txq_get(m);
- rte_eth_tx_burst(m->port, qid, &m, 1);
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m[i]);
+ txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
}
+ dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
+
return nb_events;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c21571e63d..e001a7e49d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -241,6 +241,10 @@ void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
+__rte_internal
+uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts);
+
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index ee3ed1b152..1096b1cf1d 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1468,6 +1468,148 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to multiple queues respectively.*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ struct rte_mbuf *mi;
+ struct rte_eth_dev_data *eth_data;
+ struct dpaa2_dev_priv *priv;
+ struct dpaa2_queue *order_sendq;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ for (loop = 0; loop < nb_pkts; loop++) {
+ dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+ eth_data = dpaa2_q[loop]->eth_data;
+ priv = eth_data->dev_private;
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+ order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_fq(&eqdesc[loop],
+ dpaa2_q[loop]->fqid);
+ }
+
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & RTE_MBUF_F_TX_VLAN)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_frames;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ dpaa2_q[loop]++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_frames;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_frames;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_frames;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp,
+ bpid))
+ goto send_frames;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+
+ bufs++;
+ dpaa2_q[loop]++;
+ }
+
+send_frames:
+ frames_to_send = loop;
+ loop = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[loop],
+ frames_to_send - loop);
+ if (likely(ret > 0)) {
+ loop += ret;
+ } else {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ break;
+ }
+ }
+
+ return loop;
+}
+
/* Callback to handle sending ordered packets through WRIOP based interface */
uint16_t
dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
diff --git a/drivers/net/dpaa2/version.map b/drivers/net/dpaa2/version.map
index 2fe61f3442..cc82b8579d 100644
--- a/drivers/net/dpaa2/version.map
+++ b/drivers/net/dpaa2/version.map
@@ -21,6 +21,7 @@ EXPERIMENTAL {
INTERNAL {
global:
+ dpaa2_dev_tx_multi_txq_ordered;
dpaa2_eth_eventq_attach;
dpaa2_eth_eventq_detach;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered
2021-12-27 16:16 ` [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
@ 2021-12-27 18:01 ` Stephen Hemminger
2022-01-03 5:47 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: Stephen Hemminger @ 2021-12-27 18:01 UTC (permalink / raw)
To: nipun.gupta; +Cc: dev, thomas, ferruh.yigit, hemant.agrawal, Jun Yang
On Mon, 27 Dec 2021 21:46:35 +0530
nipun.gupta@nxp.com wrote:
> @@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
> struct rte_event ev[],
> uint16_t nb_events)
> {
> - struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
> + void *txq[32];
> + struct rte_mbuf *m[32];
You are assuming nb_events <= 32.
Why not size the array based on nb_events.
> uint8_t qid, i;
>
> RTE_SET_USED(port);
>
> for (i = 0; i < nb_events; i++) {
> - qid = rte_event_eth_tx_adapter_txq_get(m);
> - rte_eth_tx_burst(m->port, qid, &m, 1);
> + m[i] = (struct rte_mbuf *)ev[i].mbuf;
Why the cast? it is already the right type.
> + qid = rte_event_eth_tx_adapter_txq_get(m[i]);
> + txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered
2021-12-27 18:01 ` Stephen Hemminger
@ 2022-01-03 5:47 ` Nipun Gupta
2022-01-03 8:39 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: Nipun Gupta @ 2022-01-03 5:47 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: dev, thomas, ferruh.yigit, Hemant Agrawal, Jun Yang
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: 27 December 2021 23:32
> To: Nipun Gupta <nipun.gupta@nxp.com>
> Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Jun Yang <jun.yang@nxp.com>
> Subject: Re: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for
> ordered
>
> On Mon, 27 Dec 2021 21:46:35 +0530
> nipun.gupta@nxp.com wrote:
>
> > @@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
> > struct rte_event ev[],
> > uint16_t nb_events)
> > {
> > - struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
> > + void *txq[32];
> > + struct rte_mbuf *m[32];
>
> You are assuming nb_events <= 32.
> Why not size the array based on nb_events.
Agree. Actually I will use DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH here.
>
> > uint8_t qid, i;
> >
> > RTE_SET_USED(port);
> >
> > for (i = 0; i < nb_events; i++) {
> > - qid = rte_event_eth_tx_adapter_txq_get(m);
> > - rte_eth_tx_burst(m->port, qid, &m, 1);
> > + m[i] = (struct rte_mbuf *)ev[i].mbuf;
>
> Why the cast? it is already the right type.
Will remove the cast.
Thanks,
Nipun
>
> > + qid = rte_event_eth_tx_adapter_txq_get(m[i]);
> > + txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered
2022-01-03 5:47 ` Nipun Gupta
@ 2022-01-03 8:39 ` Nipun Gupta
0 siblings, 0 replies; 68+ messages in thread
From: Nipun Gupta @ 2022-01-03 8:39 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: dev, thomas, ferruh.yigit, Hemant Agrawal, Jun Yang
> -----Original Message-----
> From: Nipun Gupta
> Sent: 03 January 2022 11:17
> To: Stephen Hemminger <stephen@networkplumber.org>
> Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Jun Yang <jun.yang@nxp.com>
> Subject: RE: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for
> ordered
>
>
>
> > -----Original Message-----
> > From: Stephen Hemminger <stephen@networkplumber.org>
> > Sent: 27 December 2021 23:32
> > To: Nipun Gupta <nipun.gupta@nxp.com>
> > Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> > Agrawal <hemant.agrawal@nxp.com>; Jun Yang <jun.yang@nxp.com>
> > Subject: Re: [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for
> > ordered
> >
> > On Mon, 27 Dec 2021 21:46:35 +0530
> > nipun.gupta@nxp.com wrote:
> >
> > > @@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
> > > struct rte_event ev[],
> > > uint16_t nb_events)
> > > {
> > > - struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
> > > + void *txq[32];
> > > + struct rte_mbuf *m[32];
> >
> > You are assuming nb_events <= 32.
> > Why not size the array based on nb_events.
>
> Agree. Actually I will use DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH here.
>
> >
> > > uint8_t qid, i;
> > >
> > > RTE_SET_USED(port);
> > >
> > > for (i = 0; i < nb_events; i++) {
> > > - qid = rte_event_eth_tx_adapter_txq_get(m);
> > > - rte_eth_tx_burst(m->port, qid, &m, 1);
> > > + m[i] = (struct rte_mbuf *)ev[i].mbuf;
> >
> > Why the cast? it is already the right type.
>
> Will remove the cast.
mbuf is void *type in event structure, so it seems better to cast here.
>
> Thanks,
> Nipun
>
> >
> > > + qid = rte_event_eth_tx_adapter_txq_get(m[i]);
> > > + txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 07/16] net/dpaa2: add support for level 2 in traffic management
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (5 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 06/16] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 08/16] net/dpaa2: secondary process handling for dpni nipun.gupta
` (9 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
This patch adds support for level 2 for QoS shaping.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
doc/guides/nics/dpaa2.rst | 2 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 55 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 6 +-
drivers/net/dpaa2/dpaa2_tm.c | 563 ++++++++++++++++++++++------
drivers/net/dpaa2/dpaa2_tm.h | 17 +-
drivers/net/dpaa2/mc/dpni.c | 302 +++++++++------
drivers/net/dpaa2/mc/fsl_dpni.h | 119 +++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 79 ++--
8 files changed, 791 insertions(+), 352 deletions(-)
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 831bc56488..2d113f53df 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -588,7 +588,7 @@ Supported Features
The following capabilities are supported:
-- Level0 (root node) and Level1 are supported.
+- Level0 (root node), Level1 and Level2 are supported.
- 1 private shaper at root node (port level) is supported.
- 8 TX queues per port supported (1 channel per port)
- Both SP and WFQ scheduling mechanisms are supported on all 8 queues.
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 18ff07249f..b91e773605 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -852,6 +852,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
uint8_t options = 0, flow_id;
+ uint16_t channel_id;
struct dpni_queue_id qid;
uint32_t tc_id;
int ret;
@@ -877,20 +878,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- tc_id = tx_queue_id;
- flow_id = 0;
-
- ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
- tc_id, flow_id, options, &tx_flow_cfg);
- if (ret) {
- DPAA2_PMD_ERR("Error in setting the tx flow: "
- "tc_id=%d, flow=%d err=%d",
- tc_id, flow_id, ret);
- return -1;
- }
-
- dpaa2_q->flow_id = flow_id;
-
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
if (priv->flags & DPAA2_TX_CONF_ENABLE)
@@ -907,10 +894,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
}
+
+ tc_id = tx_queue_id % priv->num_tx_tc;
+ channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
+ flow_id = 0;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, dpaa2_q->tc_index,
+ DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -942,7 +945,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
priv->token,
DPNI_QUEUE_TX,
- tc_id,
+ ((channel_id << 8) | tc_id),
&cong_notif_cfg);
if (ret) {
DPAA2_PMD_ERR(
@@ -959,7 +962,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
options = options | DPNI_QUEUE_OPT_USER_CTX;
tx_conf_cfg.user_context = (size_t)(dpaa2_q);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx conf flow: "
@@ -970,7 +973,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -1152,7 +1155,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpni_queue cfg;
struct dpni_error_cfg err_cfg;
- uint16_t qdid;
struct dpni_queue_id qid;
struct dpaa2_queue *dpaa2_q;
int ret, i;
@@ -1162,7 +1164,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
-
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
@@ -1173,14 +1174,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
/* Power up the phy. Needed to make the link go UP */
dpaa2_dev_set_link_up(dev);
- ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, &qdid);
- if (ret) {
- DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
- return ret;
- }
- priv->qdid = qdid;
-
for (i = 0; i < data->nb_rx_queues; i++) {
dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
@@ -2619,9 +2612,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
priv->num_rx_tc = attr.num_rx_tcs;
+ priv->num_tx_tc = attr.num_tx_tcs;
priv->qos_entries = attr.qos_entries;
priv->fs_entries = attr.fs_entries;
priv->dist_queues = attr.num_queues;
+ priv->num_channels = attr.num_channels;
+ priv->channel_inuse = 0;
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2635,8 +2631,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
for (i = 0; i < attr.num_rx_tcs; i++)
priv->nb_rx_queues += attr.num_queues;
- /* Using number of TX queues as number of TX TCs */
- priv->nb_tx_queues = attr.num_tx_tcs;
+ priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
priv->num_rx_tc, priv->nb_rx_queues,
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index e001a7e49d..1fc2fc367e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -25,6 +25,7 @@
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
#define MAX_DPNI 8
+#define DPAA2_MAX_CHANNELS 16
#define DPAA2_RX_DEFAULT_NBDESC 512
@@ -160,15 +161,17 @@ struct dpaa2_dev_priv {
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
- void *tx_conf_vq[MAX_TX_QUEUES];
+ void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
+ uint8_t num_tx_tc;
uint16_t qos_entries;
uint16_t fs_entries;
uint8_t dist_queues;
+ uint8_t num_channels;
uint8_t en_ordered;
uint8_t en_loose_ordered;
uint8_t max_cgs;
@@ -190,6 +193,7 @@ struct dpaa2_dev_priv {
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index f5faaedfb4..8fe5bfa013 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#include <rte_ethdev.h>
@@ -7,12 +7,16 @@
#include <rte_tm_driver.h>
#include "dpaa2_ethdev.h"
+#include "dpaa2_pmd_logs.h"
+#include <dpaa2_hw_dpio.h>
#define DPAA2_BURST_MAX (64 * 1024)
#define DPAA2_SHAPER_MIN_RATE 0
#define DPAA2_SHAPER_MAX_RATE 107374182400ull
#define DPAA2_WEIGHT_MAX 24701
+#define DPAA2_PKT_ADJUST_LEN_MIN 0
+#define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
int
dpaa2_tm_init(struct rte_eth_dev *dev)
@@ -66,6 +70,8 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -73,27 +79,31 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- /* root node(port) + txqs number, assuming each TX
+ /* root node(port) + channels + txqs number, assuming each TX
* Queue is mapped to each TC
*/
- cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
- cap->n_levels_max = 2; /* port level + txqs level */
+ cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
+ cap->n_levels_max = MAX_LEVEL;
cap->non_leaf_nodes_identical = 1;
cap->leaf_nodes_identical = 1;
- cap->shaper_n_max = 1;
- cap->shaper_private_n_max = 1;
- cap->shaper_private_dual_rate_n_max = 1;
+ cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
+ cap->shaper_private_n_max = 1 + priv->num_channels;
+ cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+ cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
+ cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
- cap->sched_n_children_max = dev->data->nb_tx_queues;
- cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_groups_max = 2;
- cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ if (priv->num_channels > DPNI_MAX_TC)
+ cap->sched_n_children_max = priv->num_channels;
+ else
+ cap->sched_n_children_max = DPNI_MAX_TC;
- cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
+ cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_groups_max = 2;
+ cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
return 0;
@@ -105,6 +115,8 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -112,12 +124,12 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- if (level_id > 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
- if (level_id == 0) { /* Root node */
+ if (level_id == LNI_LEVEL) { /* Root node (LNI) */
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->non_leaf_nodes_identical = 1;
@@ -127,20 +139,39 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
- cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
+ } else if (level_id == CHANNEL_LEVEL) { /* channels */
+ cap->n_nodes_max = priv->num_channels;
+ cap->n_nodes_nonleaf_max = priv->num_channels;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = 1;
+ cap->nonleaf.shaper_private_dual_rate_supported = 1;
+ cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ /* no. of class queues per channel */
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_groups_max = 2;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else { /* leaf nodes */
- cap->n_nodes_max = dev->data->nb_tx_queues;
- cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
+ /* queues per channels * channel */
+ cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
+ cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
cap->leaf_nodes_identical = 1;
- cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->leaf.shaper_private_supported = 0;
+ cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -167,18 +198,33 @@ dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- if (node->type == 0) {
+ if (node->level_id == LNI_LEVEL) {
cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels;
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ } else if (node->level_id == CHANNEL_LEVEL) {
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
- cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else {
- cap->stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -202,7 +248,7 @@ dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- *is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
+ *is_leaf = node->type == LEAF_NODE ? 1 : 0;
return 0;
}
@@ -257,6 +303,13 @@ dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
NULL, "Wrong shaper profile id\n");
+ if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
+ params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ "Not supported pkt adjust length\n");
+
profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
if (profile)
return -rte_tm_error_set(error, EEXIST,
@@ -318,7 +371,7 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL, "Weight is out of range\n");
- if (level_id != 0 && level_id != 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
@@ -338,39 +391,38 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
NULL, "Shared shaper is not supported\n");
- /* verify port (root node) settings */
+ /* verify non leaf nodes settings */
if (node_id >= dev->data->nb_tx_queues) {
if (params->nonleaf.wfq_weight_mode)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
NULL, "WFQ weight mode is not supported\n");
-
- if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
- RTE_TM_STATS_N_BYTES))
+ } else {
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested port stats are not supported\n");
-
- return 0;
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL, "Private shaper not supported on leaf\n");
}
- if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
- NULL, "Private shaper not supported on leaf\n");
-
- if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested stats are not supported\n");
/* check leaf node */
- if (level_id == 1) {
+ if (level_id == QUEUE_LEVEL) {
if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
NULL, "Only taildrop is supported\n");
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+ } else if (level_id == LNI_LEVEL) {
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
}
return 0;
@@ -407,7 +459,7 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
if (parent_node_id == RTE_TM_NODE_ID_NULL) {
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type != 0 /*root node*/)
+ if (node->level_id != LNI_LEVEL)
continue;
return -rte_tm_error_set(error, EINVAL,
@@ -435,14 +487,29 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
NULL, NULL);
node->id = node_id;
- node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
- 1/*NODE_QUEUE*/;
+
+ if (node_id > dev->data->nb_tx_queues)
+ node->type = NON_LEAF_NODE;
+ else
+ node->type = LEAF_NODE;
+
+ node->level_id = level_id;
+ if (node->level_id == CHANNEL_LEVEL) {
+ if (priv->channel_inuse < priv->num_channels) {
+ node->channel_id = priv->channel_inuse;
+ priv->channel_inuse++;
+ } else {
+ printf("error no channel id available\n");
+ }
+ }
if (parent) {
node->parent = parent;
parent->refcnt++;
}
+ /* TODO: add check if refcnt is more than supported children */
+
if (profile) {
node->profile = profile;
profile->refcnt++;
@@ -464,6 +531,7 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node;
+ /* XXX: update it */
if (0) {
return -rte_tm_error_set(error, EPERM,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -493,119 +561,326 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
+static int
+dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
+{
+ int ret = 0;
+ uint32_t tc_id;
+ uint8_t flow_id, options = 0;
+ struct dpni_queue tx_flow_cfg;
+ struct dpni_queue_id qid;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+ tc_id = node->parent->tc_id;
+ node->parent->tc_id++;
+ flow_id = 0;
+
+ if (dpaa2_q == NULL) {
+ printf("Queue is not configured for node = %d\n", node->id);
+ return -1;
+ }
+
+ DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
+ node->parent->channel_id);
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ printf("Error in setting the tx flow: "
+ "channel id = %d tc_id= %d, param = 0x%x "
+ "flow=%d err=%d\n", node->parent->channel_id, tc_id,
+ ((node->parent->channel_id << 8) | tc_id), flow_id,
+ ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+ dpaa2_q->tc_index = tc_id;
+
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
+ dpaa2_q->flow_id, &tx_flow_cfg, &qid);
+ if (ret) {
+ printf("Error in getting LFQID err=%d", ret);
+ return -1;
+ }
+ dpaa2_q->fqid = qid.fqid;
+
+ /* setting congestion notification */
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.(90% of value)
+ */
+ cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova =
+ (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+ cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ &cong_notif_cfg);
+ if (ret) {
+ printf("Error in setting tx congestion notification: "
+ "err=%d", ret);
+ return -ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
+ struct dpaa2_tm_node **nodes, int n)
+{
+ struct dpaa2_tm_node *temp_node;
+ int i;
+
+ if (n == 1) {
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+ return;
+ }
+
+ for (i = 0; i < n - 1; i++) {
+ if (nodes[i]->priority > nodes[i + 1]->priority) {
+ temp_node = nodes[i];
+ nodes[i] = nodes[i + 1];
+ nodes[i + 1] = temp_node;
+ }
+ }
+ dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
+
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+}
+
static int
dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct dpaa2_tm_node *node, *temp_node;
+ struct dpaa2_tm_node *node;
+ struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
- int ret;
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
- struct dpni_tx_priorities_cfg prio_cfg;
+ int ret, t;
+
+ /* Populate TCs */
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
+ int i = 0;
- memset(&prio_cfg, 0, sizeof(prio_cfg));
- memset(conf, 0, sizeof(conf));
+ if (channel_node->level_id != CHANNEL_LEVEL)
+ continue;
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+ if (leaf_node->parent == channel_node) {
+ if (i >= DPNI_MAX_TC) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "More children than supported\n");
+ goto out;
+ }
+ nodes[i++] = leaf_node;
+ }
+ }
+ if (i > 0) {
+ DPAA2_PMD_DEBUG("Configure queues\n");
+ dpaa2_tm_sort_and_configure(dev, nodes, i);
+ }
+ }
+
+ /* Shaping */
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type == 0/*root node*/) {
+ if (node->type == NON_LEAF_NODE) {
if (!node->profile)
continue;
-
struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
+ uint32_t param = 0;
tx_cr_shaper.max_burst_size =
node->profile->params.committed.size;
tx_cr_shaper.rate_limit =
- node->profile->params.committed.rate / (1024 * 1024);
+ node->profile->params.committed.rate /
+ (1024 * 1024);
tx_er_shaper.max_burst_size =
node->profile->params.peak.size;
tx_er_shaper.rate_limit =
node->profile->params.peak.rate / (1024 * 1024);
+ /* root node */
+ if (node->parent == NULL) {
+ DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
+ tx_cr_shaper.rate_limit,
+ tx_cr_shaper.max_burst_size);
+ param = 0x2;
+ param |= node->profile->params.pkt_length_adjust << 16;
+ } else {
+ DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
+ node->channel_id,
+ tx_cr_shaper.rate_limit);
+ param = (node->channel_id << 8);
+ }
ret = dpni_set_tx_shaping(dpni, 0, priv->token,
- &tx_cr_shaper, &tx_er_shaper, 0);
+ &tx_cr_shaper, &tx_er_shaper, param);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
"Error in setting Shaping\n");
goto out;
}
+ continue;
+ }
+ }
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ struct dpni_tx_priorities_cfg prio_cfg;
+
+ memset(&prio_cfg, 0, sizeof(prio_cfg));
+ memset(conf, 0, sizeof(conf));
+
+ /* Process for each channel */
+ if (channel_node->level_id != CHANNEL_LEVEL)
continue;
- } else { /* level 1, all leaf nodes */
- if (node->id >= dev->data->nb_tx_queues) {
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ struct dpaa2_queue *leaf_dpaa2_q;
+ uint8_t leaf_tc_id;
+
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ /* level 2, all leaf nodes */
+ if (leaf_node->id >= dev->data->nb_tx_queues) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID, NULL,
"Not enough txqs configured\n");
goto out;
}
- if (conf[node->id])
+ if (conf[leaf_node->id])
+ continue;
+
+ if (leaf_node->parent != channel_node)
continue;
- LIST_FOREACH(temp_node, &priv->nodes, next) {
- if (temp_node->id == node->id ||
- temp_node->type == 0)
+ leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
+ leaf_tc_id = leaf_dpaa2_q->tc_index;
+ /* Process sibling leaf nodes */
+ LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
+ if (temp_leaf_node->id == leaf_node->id ||
+ temp_leaf_node->level_id == LNI_LEVEL ||
+ temp_leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ if (temp_leaf_node->parent != channel_node)
continue;
- if (conf[temp_node->id])
+
+ if (conf[temp_leaf_node->id])
continue;
- if (node->priority == temp_node->priority) {
+
+ if (leaf_node->priority == temp_leaf_node->priority) {
+ struct dpaa2_queue *temp_leaf_dpaa2_q;
+ uint8_t temp_leaf_tc_id;
+
+ temp_leaf_dpaa2_q = (struct dpaa2_queue *)
+ dev->data->tx_queues[temp_leaf_node->id];
+ temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
if (wfq_grp == 0) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- /* DPDK support lowest weight 1
- * and DPAA2 platform 100
- */
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ /* DPAA2 support weight in multiple of 100 */
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else {
- /*TODO: add one more check for
- * number of nodes in a group
- */
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Only 2 WFQ Groups are supported\n");
goto out;
}
- conf[temp_node->id] = 1;
is_wfq_grp = 1;
+ conf[temp_leaf_node->id] = 1;
}
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_A = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_A = leaf_node->priority;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_B = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_B = leaf_node->priority;
}
wfq_grp++;
is_wfq_grp = 0;
}
- conf[node->id] = 1;
+ conf[leaf_node->id] = 1;
}
- if (wfq_grp)
+ if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
- }
- ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
- if (ret) {
- ret = -rte_tm_error_set(error, EINVAL,
+ if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
+ prio_cfg.prio_group_A = 0;
+ prio_cfg.prio_group_B = 1;
+ } else {
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.prio_group_B = 0;
+ }
+ }
+
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.channel_idx = channel_node->channel_id;
+ ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
+ if (ret) {
+ ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Scheduling Failed\n");
- goto out;
+ goto out;
+ }
+ DPAA2_PMD_DEBUG("########################################\n");
+ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++) {
+ DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
+ DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
+ }
+ DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
+ DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
+ DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
-
return 0;
out:
@@ -617,6 +892,81 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
return ret;
}
+static int
+dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats, uint64_t *stats_mask,
+ int clear, struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ union dpni_statistics value;
+ int ret = 0;
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (stats_mask)
+ *stats_mask = node->stats_mask;
+
+ if (!stats)
+ return 0;
+
+ memset(stats, 0, sizeof(*stats));
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ if (node->level_id == LNI_LEVEL) {
+ uint8_t page1 = 1;
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, 0, &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read port statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_1.egress_all_frames;
+
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_1.egress_all_bytes;
+
+ if (clear) {
+ ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to reset port statistics\n");
+ }
+ } else if (node->level_id == QUEUE_LEVEL) {
+ uint8_t page3 = 3;
+ struct dpaa2_queue *dpaa2_q;
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page3,
+ (node->parent->channel_id << 8 |
+ dpaa2_q->tc_index), &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read queue statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_3.ceetm_dequeue_frames;
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
+ } else {
+ return -rte_tm_error_set(error, -1,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read channel statistics\n");
+ }
+
+ return 0;
+}
+
const struct rte_tm_ops dpaa2_tm_ops = {
.node_type_get = dpaa2_node_type_get,
.capabilities_get = dpaa2_capabilities_get,
@@ -627,4 +977,5 @@ const struct rte_tm_ops dpaa2_tm_ops = {
.node_add = dpaa2_node_add,
.node_delete = dpaa2_node_delete,
.hierarchy_commit = dpaa2_hierarchy_commit,
+ .node_stats_read = dpaa2_node_stats_read,
};
diff --git a/drivers/net/dpaa2/dpaa2_tm.h b/drivers/net/dpaa2/dpaa2_tm.h
index 6632fab687..cfbb437322 100644
--- a/drivers/net/dpaa2/dpaa2_tm.h
+++ b/drivers/net/dpaa2/dpaa2_tm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#ifndef _DPAA2_TM_H_
@@ -7,6 +7,18 @@
#include <rte_tm.h>
+enum node_type {
+ NON_LEAF_NODE = 0,
+ LEAF_NODE
+};
+
+enum level_type {
+ LNI_LEVEL = 0,
+ CHANNEL_LEVEL,
+ QUEUE_LEVEL,
+ MAX_LEVEL
+};
+
struct dpaa2_tm_shaper_profile {
LIST_ENTRY(dpaa2_tm_shaper_profile) next;
uint32_t id;
@@ -18,6 +30,9 @@ struct dpaa2_tm_node {
LIST_ENTRY(dpaa2_tm_node) next;
uint32_t id;
uint32_t type;
+ uint32_t level_id;
+ uint16_t channel_id; /* Only for level 1 nodes */
+ uint16_t tc_id; /* Only for level 1 nodes */
int refcnt;
struct dpaa2_tm_node *parent;
struct dpaa2_tm_shaper_profile *profile;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index cf78295d90..b7a65cb637 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -916,6 +916,44 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_link_cfg() - return the link configuration configured by
+ * dpni_set_link_cfg().
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cfg->advertising = le64_to_cpu(rsp_params->advertising);
+ cfg->options = le64_to_cpu(rsp_params->options);
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+
+ return err;
+}
+
/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
@@ -1678,6 +1716,38 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_tx_confirmation_mode() - Get Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode *mode)
+{
+ struct dpni_tx_confirmation_mode *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_tx_confirmation_mode *)cmd.params;
+ *mode = rsp_params->confirmation_mode;
+
+ return 0;
+}
+
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
@@ -2733,6 +2803,122 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_load_ss_cfg *cfg)
+{
+ struct dpni_load_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
+ cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_enable_ss_cfg *cfg)
+{
+ struct dpni_enable_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->set_start = cfg->set_start;
+ cmd_params->hxs = cpu_to_le16(cfg->hxs);
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->param_offset = cfg->param_offset;
+ cmd_params->param_size = cfg->param_size;
+ cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_sw_sequence_layout() - Get the soft sequence layout
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @src: Source of the layout (WRIOP Rx or Tx)
+ * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
+ *
+ * warning: After calling this function, call dpni_extract_sw_sequence_layout()
+ * to get the layout.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_soft_sequence_dest src,
+ uint64_t ss_layout_iova)
+{
+ struct dpni_get_sw_sequence_layout *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
+ cmd_flags,
+ token);
+
+ cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
+ cmd_params->src = src;
+ cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_extract_sw_sequence_layout() - extract the software sequence layout
+ * @layout: software sequence layout
+ * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
+ * to DMA
+ *
+ * This function has to be called after dpni_get_sw_sequence_layout
+ *
+ */
+void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
+ const uint8_t *sw_sequence_layout_buf)
+{
+ const struct dpni_sw_sequence_layout_entry *ext_params;
+ int i;
+ uint16_t ss_size, ss_offset;
+
+ ext_params = (const struct dpni_sw_sequence_layout_entry *)
+ sw_sequence_layout_buf;
+
+ for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
+ ss_offset = le16_to_cpu(ext_params[i].ss_offset);
+ ss_size = le16_to_cpu(ext_params[i].ss_size);
+
+ if (ss_offset == 0 && ss_size == 0) {
+ layout->num_ss = i;
+ return;
+ }
+
+ layout->ss[i].ss_offset = ss_offset;
+ layout->ss[i].ss_size = ss_size;
+ layout->ss[i].param_offset = ext_params[i].param_offset;
+ layout->ss[i].param_size = ext_params[i].param_size;
+ }
+}
/**
* dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
* @mc_io: Pointer to MC portal's I/O object
@@ -2901,119 +3087,3 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
-int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_load_ss_cfg *cfg)
-{
- struct dpni_load_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
- cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_enable_ss_cfg *cfg)
-{
- struct dpni_enable_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->set_start = cfg->set_start;
- cmd_params->hxs = cpu_to_le16(cfg->hxs);
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->param_offset = cfg->param_offset;
- cmd_params->param_size = cfg->param_size;
- cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @src: Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- * to get the layout.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_soft_sequence_dest src,
- uint64_t ss_layout_iova)
-{
- struct dpni_get_sw_sequence_layout *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
- cmd_flags,
- token);
-
- cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
- cmd_params->src = src;
- cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout: software sequence layout
- * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
- * to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
- const uint8_t *sw_sequence_layout_buf)
-{
- const struct dpni_sw_sequence_layout_entry *ext_params;
- int i;
- uint16_t ss_size, ss_offset;
-
- ext_params = (const struct dpni_sw_sequence_layout_entry *)
- sw_sequence_layout_buf;
-
- for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
- ss_offset = le16_to_cpu(ext_params[i].ss_offset);
- ss_size = le16_to_cpu(ext_params[i].ss_size);
-
- if (ss_offset == 0 && ss_size == 0) {
- layout->num_ss = i;
- return;
- }
-
- layout->ss[i].ss_offset = ss_offset;
- layout->ss[i].ss_size = ss_size;
- layout->ss[i].param_offset = ext_params[i].param_offset;
- layout->ss[i].param_size = ext_params[i].param_size;
- }
-}
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 8aead28261..c7df727fef 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -761,6 +761,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
@@ -1709,63 +1714,6 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
uint8_t flags,
uint8_t opr_id);
-/**
- * When used for queue_idx in function dpni_set_rx_dist_default_queue will
- * signal to dpni to drop all unclassified frames
- */
-#define DPNI_FS_MISS_DROP ((uint16_t)-1)
-
-/**
- * struct dpni_rx_dist_cfg - distribution configuration
- * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
- * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
- * 512,768,896,1024
- * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
- * the extractions to be used for the distribution key by calling
- * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
- * it can be '0'
- * @enable: enable/disable the distribution.
- * @tc: TC id for which distribution is set
- * @fs_miss_flow_id: when packet misses all rules from flow steering table and
- * hash is disabled it will be put into this queue id; use
- * DPNI_FS_MISS_DROP to drop frames. The value of this field is
- * used only when flow steering distribution is enabled and hash
- * distribution is disabled
- */
-struct dpni_rx_dist_cfg {
- uint16_t dist_size;
- uint64_t key_cfg_iova;
- uint8_t enable;
- uint8_t tc;
- uint16_t fs_miss_flow_id;
-};
-
-int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-/**
- * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
- * values used in current dpni object to detect 802.1q frames.
- * @tpid1: first tag. Not used if zero.
- * @tpid2: second tag. Not used if zero.
- */
-struct dpni_custom_tpid_cfg {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, struct dpni_custom_tpid_cfg *tpid);
-
/**
* enum dpni_soft_sequence_dest - Enumeration of WRIOP software sequence
* destinations
@@ -1936,4 +1884,61 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_port_cfg *port_cfg);
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue will
+ * signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ * 512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ uint16_t dist_size;
+ uint64_t key_cfg_iova;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+/**
+ * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
+ * values used in current dpni object to detect 802.1q frames.
+ * @tpid1: first tag. Not used if zero.
+ * @tpid2: second tag. Not used if zero.
+ */
+struct dpni_custom_tpid_cfg {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
+int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, struct dpni_custom_tpid_cfg *tpid);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 8bff2ec9af..ed0bd7615a 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -108,16 +108,17 @@
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
+#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270)
#define DPNI_CMDID_ENABLE_SW_SEQUENCE DPNI_CMD(0x271)
#define DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT DPNI_CMD(0x272)
-#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
-#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -451,8 +452,6 @@ struct dpni_cmd_enable_vlan_filter {
uint8_t en;
};
-#define DPNI_VLAN_SET_QUEUE_ACTION 1
-
struct dpni_cmd_vlan_id {
uint8_t flags;
uint8_t tc_id;
@@ -854,42 +853,6 @@ struct dpni_rsp_get_opr {
uint16_t opr_id;
};
-struct dpni_cmd_add_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_cmd_remove_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_rsp_get_custom_tpid {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_fs_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc;
- uint16_t miss_flow_id;
- uint16_t pad1;
- uint64_t key_cfg_iova;
-};
-
-#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_hash_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc_id;
- uint32_t pad;
- uint64_t key_cfg_iova;
-};
-
struct dpni_load_sw_sequence {
uint8_t dest;
uint8_t pad0[7];
@@ -957,5 +920,41 @@ struct dpni_rsp_get_port_cfg {
uint32_t bit_params;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t miss_flow_id;
+ uint16_t pad1;
+ uint64_t key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc_id;
+ uint32_t pad;
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_add_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_cmd_remove_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_rsp_get_custom_tpid {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 08/16] net/dpaa2: secondary process handling for dpni
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (6 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 07/16] net/dpaa2: add support for level 2 in traffic management nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 09/16] bus/fslmc: add and scan dprc devices nipun.gupta
` (8 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
This change uses 'dev->process_private' instead of 'priv->hw'
to get dpmcp per process while setting flow distribution,
as priv->hw is only valid for primary process.
It also initialize rte_dpaa2_bpid_info in secondary process.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +++++++++++++++++++++++
drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +++++++++++++++
drivers/mempool/dpaa2/version.map | 1 +
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 ++---
drivers/net/dpaa2/dpaa2_ethdev.c | 10 ++++++++--
drivers/net/dpaa2/dpaa2_ethdev.h | 3 ++-
6 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 39c6252a63..56c629c681 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -263,6 +263,29 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
}
}
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info = mempool_to_bpinfo(mp);
+ uint32_t bpid = bp_info->bpid;
+
+ if (!rte_dpaa2_bpid_info) {
+ rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa2_bpid_info == NULL)
+ return -ENOMEM;
+ memset(rte_dpaa2_bpid_info, 0,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID);
+ }
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_info->bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ return 0;
+}
+
uint16_t
rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
{
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
index 4a22b7c42e..28dea74326 100644
--- a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -46,6 +46,21 @@ rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
struct rte_mbuf *
rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+/**
+ * Initialize the rte_dpaa2_bpid_info
+ * In generial, it is called in the secondary process and
+ * mp has been created in the primary process.
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * - 0 on success.
+ * - (<0) on failure.
+ */
+__rte_internal
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/mempool/dpaa2/version.map b/drivers/mempool/dpaa2/version.map
index 49c460ec54..cfd4ae617a 100644
--- a/drivers/mempool/dpaa2/version.map
+++ b/drivers/mempool/dpaa2/version.map
@@ -11,5 +11,6 @@ INTERNAL {
global:
rte_dpaa2_bpid_info;
+ rte_dpaa2_bpid_info_init;
rte_dpaa2_mbuf_alloc_bulk;
};
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 3170694841..9509f6e8a3 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -95,7 +95,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
uint64_t req_dist_set, int tc_index)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
- struct fsl_mc_io *dpni = priv->hw;
+ struct fsl_mc_io *dpni = eth_dev->process_private;
struct dpni_rx_dist_cfg tc_cfg;
struct dpkg_profile_cfg kg_cfg;
void *p_params;
@@ -457,13 +457,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
int
dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
- void *blist)
+ struct fsl_mc_io *dpni, void *blist)
{
/* Function to attach a DPNI with a buffer pool list. Buffer pool list
* handle is passed in blist.
*/
int32_t retcode;
- struct fsl_mc_io *dpni = priv->hw;
struct dpni_pools_cfg bpool_cfg;
struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
struct dpni_buffer_layout layout;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index b91e773605..a45beed75f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -18,6 +18,7 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
#include <rte_flow_driver.h>
+#include "rte_dpaa2_mempool.h"
#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
@@ -712,9 +713,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ret = rte_dpaa2_bpid_info_init(mb_pool);
+ if (ret)
+ return ret;
+ }
bpid = mempool_to_bpid(mb_pool);
- ret = dpaa2_attach_bp_list(priv,
- rte_dpaa2_bpid_info[bpid].bp_list);
+ ret = dpaa2_attach_bp_list(priv, dpni,
+ rte_dpaa2_bpid_info[bpid].bp_list);
if (ret)
return ret;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 1fc2fc367e..bd33a22a8e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -208,7 +208,8 @@ int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
-int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ struct fsl_mc_io *dpni, void *blist);
__rte_internal
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 09/16] bus/fslmc: add and scan dprc devices
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (7 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 08/16] net/dpaa2: secondary process handling for dpni nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 10/16] net/dpaa2: support recycle loopback port nipun.gupta
` (7 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang, Nipun Gupta
From: Jun Yang <jun.yang@nxp.com>
In order to get connection endpoint of each objects,
scan the dprc object.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/fslmc_bus.c | 15 ++-
drivers/bus/fslmc/fslmc_vfio.c | 18 +++-
drivers/bus/fslmc/mc/dprc.c | 129 +++++++++++++++++++++++
drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++++++++
drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 +++++++++
drivers/bus/fslmc/meson.build | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 ++++++++++++++++++
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 12 +++
drivers/bus/fslmc/rte_fslmc.h | 10 +-
9 files changed, 374 insertions(+), 8 deletions(-)
create mode 100644 drivers/bus/fslmc/mc/dprc.c
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index a0ef24cdc8..a3c0d838c4 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2018-2019 NXP
+ * Copyright 2016,2018-2021 NXP
*
*/
@@ -136,10 +136,6 @@ scan_one_fslmc_device(char *dev_name)
if (!dev_name)
return ret;
- /* Ignore the Container name itself */
- if (!strncmp("dprc", dev_name, 4))
- return 0;
-
/* Creating a temporary copy to perform cut-parse over string */
dup_dev_name = strdup(dev_name);
if (!dup_dev_name) {
@@ -197,6 +193,8 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_MUX;
else if (!strncmp("dprtc", t_ptr, 5))
dev->dev_type = DPAA2_DPRTC;
+ else if (!strncmp("dprc", t_ptr, 4))
+ dev->dev_type = DPAA2_DPRC;
else
dev->dev_type = DPAA2_UNKNOWN;
@@ -339,6 +337,13 @@ rte_fslmc_scan(void)
goto scan_fail;
}
+ /* Scan the DPRC container object */
+ ret = scan_one_fslmc_device(fslmc_container);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+
while ((entry = readdir(dir)) != NULL) {
if (entry->d_name[0] == '.' || entry->d_type != DT_DIR)
continue;
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index b4704eeae4..1b89a56bbc 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -728,6 +728,7 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
case DPAA2_BPOOL:
case DPAA2_DPRTC:
case DPAA2_MUX:
+ case DPAA2_DPRC:
TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
if (dev->dev_type == object->dev_type)
object->create(dev_fd, &device_info,
@@ -881,6 +882,21 @@ fslmc_vfio_process_group(void)
return -1;
}
+ /* Search for DPRC device next as it updates endpoint of
+ * other devices.
+ */
+ current_device = 0;
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_DPRC) {
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to process dprc");
+ return -1;
+ }
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ }
+ }
+
current_device = 0;
RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
dev_temp) {
diff --git a/drivers/bus/fslmc/mc/dprc.c b/drivers/bus/fslmc/mc/dprc.c
new file mode 100644
index 0000000000..491081c7c8
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dprc.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dprc.h>
+#include <fsl_dprc_cmd.h>
+
+/** @addtogroup dprc
+ * @{
+ */
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENAVAIL if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dprc.h b/drivers/bus/fslmc/mc/fsl_dprc.h
new file mode 100644
index 0000000000..177210c2d4
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#ifndef _FSL_DPRC_H
+#define _FSL_DPRC_H
+
+/** @addtogroup dprc Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ * @{
+ */
+
+struct fsl_mc_io;
+
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ uint16_t if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+#endif /* _FSL_DPRC_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dprc_cmd.h b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
new file mode 100644
index 0000000000..6efa5634d2
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+
+#ifndef _FSL_DPRC_CMD_H
+#define _FSL_DPRC_CMD_H
+
+/* Minimal supported DPRC Version */
+#define DPRC_VER_MAJOR 6
+#define DPRC_VER_MINOR 6
+
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+#pragma pack(push, 1)
+struct dprc_cmd_open {
+ uint32_t container_id;
+};
+
+struct dprc_cmd_get_connection {
+ uint32_t ep1_id;
+ uint16_t ep1_interface_id;
+ uint16_t pad;
+
+ uint8_t ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ uint64_t pad[3];
+ uint32_t ep2_id;
+ uint16_t ep2_interface_id;
+ uint16_t pad1;
+ uint8_t ep2_type[16];
+ uint32_t state;
+};
+#pragma pack(pop)
+#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
index 54be76f516..162ca286fe 100644
--- a/drivers/bus/fslmc/meson.build
+++ b/drivers/bus/fslmc/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
if not is_linux
build = false
@@ -16,10 +16,12 @@ sources = files(
'mc/dpdmai.c',
'mc/dpio.c',
'mc/dpmng.c',
+ 'mc/dprc.c',
'mc/mc_sys.c',
'portal/dpaa2_hw_dpbp.c',
'portal/dpaa2_hw_dpci.c',
'portal/dpaa2_hw_dpio.c',
+ 'portal/dpaa2_hw_dprc.c',
'qbman/qbman_portal.c',
'qbman/qbman_debug.c',
)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
new file mode 100644
index 0000000000..ca1d0304d5
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dprc.h>
+#include "portal/dpaa2_hw_pvt.h"
+
+TAILQ_HEAD(dprc_dev_list, dpaa2_dprc_dev);
+static struct dprc_dev_list dprc_dev_list
+ = TAILQ_HEAD_INITIALIZER(dprc_dev_list); /*!< DPRC device list */
+
+static int
+rte_dpaa2_create_dprc_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dprc_id)
+{
+ struct dpaa2_dprc_dev *dprc_node;
+ struct dprc_endpoint endpoint1, endpoint2;
+ struct rte_dpaa2_device *dev, *dev_tmp;
+ int ret;
+
+ /* Allocate DPAA2 dprc handle */
+ dprc_node = rte_malloc(NULL, sizeof(struct dpaa2_dprc_dev), 0);
+ if (!dprc_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPRC Device");
+ return -ENOMEM;
+ }
+
+ /* Open the dprc object */
+ dprc_node->dprc.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ dprc_node->dprc_id = dprc_id;
+ ret = dprc_open(&dprc_node->dprc,
+ CMD_PRI_LOW, dprc_id, &dprc_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_tmp) {
+ if (dev->dev_type == DPAA2_ETH) {
+ int link_state;
+
+ memset(&endpoint1, 0, sizeof(struct dprc_endpoint));
+ memset(&endpoint2, 0, sizeof(struct dprc_endpoint));
+ strcpy(endpoint1.type, "dpni");
+ endpoint1.id = dev->object_id;
+ ret = dprc_get_connection(&dprc_node->dprc,
+ CMD_PRI_LOW,
+ dprc_node->token,
+ &endpoint1, &endpoint2,
+ &link_state);
+ if (ret) {
+ DPAA2_BUS_ERR("dpni.%d connection failed!",
+ dev->object_id);
+ dprc_close(&dprc_node->dprc, CMD_PRI_LOW,
+ dprc_node->token);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ if (!strcmp(endpoint2.type, "dpmac"))
+ dev->ep_dev_type = DPAA2_MAC;
+ else if (!strcmp(endpoint2.type, "dpni"))
+ dev->ep_dev_type = DPAA2_ETH;
+ else if (!strcmp(endpoint2.type, "dpdmux"))
+ dev->ep_dev_type = DPAA2_MUX;
+ else
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+
+ dev->ep_object_id = endpoint2.id;
+ } else {
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+ }
+ sprintf(dev->ep_name, "%s.%d", endpoint2.type, endpoint2.id);
+ }
+
+ TAILQ_INSERT_TAIL(&dprc_dev_list, dprc_node, next);
+
+ return 0;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dprc_obj = {
+ .dev_type = DPAA2_DPRC,
+ .create = rte_dpaa2_create_dprc_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dprc, rte_dpaa2_dprc_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index b1bba1ac36..8cb4d404aa 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -187,6 +187,18 @@ struct swp_active_dqs {
extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+/**
+ * A structure describing a DPAA2 container.
+ */
+struct dpaa2_dprc_dev {
+ TAILQ_ENTRY(dpaa2_dprc_dev) next;
+ /**< Pointer to Next device instance */
+ const char *name;
+ struct fsl_mc_io dprc; /** handle to DPRC portal object */
+ uint16_t token;
+ uint32_t dprc_id; /*HW ID for DPRC object */
+};
+
struct dpaa2_dpci_dev {
TAILQ_ENTRY(dpaa2_dpci_dev) next;
/**< Pointer to Next device instance */
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 729f360646..12b586b13b 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2019 NXP
+ * Copyright 2016,2021 NXP
*
*/
@@ -37,6 +37,9 @@ extern "C" {
#include <fslmc_vfio.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
#define DPAA2_INVALID_MBUF_SEQN 0
@@ -88,6 +91,8 @@ enum rte_dpaa2_dev_type {
DPAA2_QDMA, /**< DPDMAI type device */
DPAA2_MUX, /**< DPDMUX type device */
DPAA2_DPRTC, /**< DPRTC type device */
+ DPAA2_DPRC, /**< DPRC type device */
+ DPAA2_MAC, /**< DPMAC type device */
/* Unknown device placeholder */
DPAA2_UNKNOWN,
DPAA2_DEVTYPE_MAX,
@@ -122,6 +127,9 @@ struct rte_dpaa2_device {
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */
uint16_t object_id; /**< DPAA2 Object ID */
+ enum rte_dpaa2_dev_type ep_dev_type; /**< Endpoint Device Type */
+ uint16_t ep_object_id; /**< Endpoint DPAA2 Object ID */
+ char ep_name[RTE_DEV_NAME_MAX_LEN];
struct rte_intr_handle *intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 10/16] net/dpaa2: support recycle loopback port
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (8 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 09/16] bus/fslmc: add and scan dprc devices nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 11/16] net/dpaa: check status before configuring shared MAC nipun.gupta
` (6 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
DPAA2 recycle port is used for configuring the device
in the loopback mode. Loopback configuration can be at
dpni level or at serdes level.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 3 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 32 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 23 +
drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++++
drivers/net/dpaa2/mc/dpni.c | 32 +
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1 +
drivers/net/dpaa2/meson.build | 1 +
7 files changed, 870 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 8cb4d404aa..4d0f7e4b5d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -176,6 +176,7 @@ struct dpaa2_queue {
uint16_t nb_desc;
uint16_t resv;
uint64_t offloads;
+ uint64_t lpbk_cntx;
} __rte_cache_aligned;
struct swp_active_dqs {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a45beed75f..d81f8cb07a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -668,6 +668,30 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
+ if (eth_conf->lpbk_mode) {
+ ret = dpaa2_dev_recycle_config(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to configure %s to recycle port.",
+ dev->data->name);
+
+ return ret;
+ }
+ } else {
+ /** User may disable loopback mode by calling
+ * "dev_configure" with lpbk_mode cleared.
+ * No matter the port was configured recycle or not,
+ * recycle de-configure is called here.
+ * If port is not recycled, the de-configure will return directly.
+ */
+ ret = dpaa2_dev_recycle_deconfig(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
+ dev->data->name);
+
+ return ret;
+ }
+ }
+
dpaa2_tm_init(dev);
return 0;
@@ -2601,6 +2625,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
+ if (eth_dev->data->dev_conf.lpbk_mode)
+ dpaa2_dev_recycle_deconfig(eth_dev);
+
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -2624,6 +2651,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->dist_queues = attr.num_queues;
priv->num_channels = attr.num_channels;
priv->channel_inuse = 0;
+ rte_spinlock_init(&priv->lpbk_qp_lock);
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2808,7 +2836,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return ret;
}
}
- RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
+ eth_dev->data->name, dpaa2_dev->ep_name);
+
return 0;
init_err:
dpaa2_dev_close(eth_dev);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index bd33a22a8e..b032da9eff 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -11,6 +11,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_pmd_dpaa2.h>
+#include <rte_fslmc.h>
#include <dpaa2_hw_pvt.h>
#include "dpaa2_tm.h"
@@ -65,6 +66,18 @@
/* Tx confirmation enabled */
#define DPAA2_TX_CONF_ENABLE 0x06
+/* HW loopback the egress traffic to self ingress*/
+#define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
+
+#define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
+
+#define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
+
+#define DPAA2_TX_LOOPBACK_MODE \
+ (DPAA2_TX_MAC_LOOPBACK_MODE | \
+ DPAA2_TX_SERDES_LOOPBACK_MODE | \
+ DPAA2_TX_DPNI_LOOPBACK_MODE)
+
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
RTE_ETH_RSS_IP | \
@@ -192,6 +205,7 @@ struct dpaa2_dev_priv {
struct dpaa2_queue *next_tx_conf_queue;
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ rte_spinlock_t lpbk_qp_lock;
uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
@@ -268,4 +282,13 @@ int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
uint32_t flags __rte_unused);
int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
+
+int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq);
+
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_recycle.c b/drivers/net/dpaa2/dpaa2_recycle.c
new file mode 100644
index 0000000000..e274d24ead
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_recycle.c
@@ -0,0 +1,780 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2019-2021 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_fslmc.h>
+#include <rte_flow_driver.h>
+
+#include "dpaa2_pmd_logs.h"
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <mc/fsl_dpmng.h>
+#include "dpaa2_ethdev.h"
+#include "dpaa2_sparser.h"
+#include <fsl_qbman_debug.h>
+
+#include <rte_io.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define LSX_SERDES_LAN_NB 8
+#define LSX_SERDES_REG_BASE 0x1ea0000
+#define LSX_LB_EN_BIT 0x10000000
+
+#define CONFIG_SYS_IMMR 0x01000000
+
+#define CONFIG_SYS_FSL_GUTS_ADDR (CONFIG_SYS_IMMR + 0x00E00000)
+#define CONFIG_SYS_FSL_SERDES_ADDR (CONFIG_SYS_IMMR + 0xEA0000)
+
+#define FSL_LX_SRDS1_PRTCL_SHIFT 16
+#define FSL_LX_SRDS2_PRTCL_SHIFT 21
+#define FSL_LX_SRDS3_PRTCL_SHIFT 26
+
+#define FSL_LS_SRDS1_PRTCL_SHIFT 16
+#define FSL_LS_SRDS2_PRTCL_SHIFT 0
+
+#define FSL_LX_SRDS1_REGSR 29
+#define FSL_LX_SRDS2_REGSR 29
+#define FSL_LX_SRDS3_REGSR 29
+
+#define FSL_LS_SRDS1_REGSR 29
+#define FSL_LS_SRDS2_REGSR 30
+
+#define FSL_LX_SRDS1_PRTCL_MASK 0x001F0000
+#define FSL_LX_SRDS2_PRTCL_MASK 0x03E00000
+#define FSL_LX_SRDS3_PRTCL_MASK 0x7C000000
+
+#define FSL_LS_SRDS1_PRTCL_MASK 0xFFFF0000
+#define FSL_LS_SRDS2_PRTCL_MASK 0x0000FFFF
+
+struct ccsr_lx_serdes_lan {
+ uint8_t unused1[0xa0];
+ uint32_t lnatcsr0;
+ uint8_t unused2[0x100 - 0xa4];
+} __rte_packed;
+
+struct ccsr_lx_serdes {
+ uint8_t unused0[0x800];
+ struct ccsr_lx_serdes_lan lane[LSX_SERDES_LAN_NB];
+} __rte_packed;
+
+struct ccsr_ls_serdes {
+ uint8_t unused[0x800];
+ struct serdes_lane {
+ uint32_t gcr0; /* General Control Register 0 */
+ uint32_t gcr1; /* General Control Register 1 */
+ uint32_t gcr2; /* General Control Register 2 */
+ uint32_t ssc0; /* Speed Switch Control 0 */
+ uint32_t rec0; /* Receive Equalization Control 0 */
+ uint32_t rec1; /* Receive Equalization Control 1 */
+ uint32_t tec0; /* Transmit Equalization Control 0 */
+ uint32_t ssc1; /* Speed Switch Control 1 */
+ uint32_t ttlc;
+ uint32_t rev[6];
+ uint32_t tsc3;
+ } lane[LSX_SERDES_LAN_NB];
+ uint8_t res5[0x19fc - 0xa00];
+} __rte_packed;
+
+struct ccsr_gur {
+ uint32_t porsr1; /* POR status 1 */
+ uint32_t porsr2; /* POR status 2 */
+ uint8_t res_008[0x20 - 0x8];
+ uint32_t gpporcr1; /* General-purpose POR configuration */
+ uint32_t gpporcr2; /* General-purpose POR configuration 2 */
+ uint32_t gpporcr3;
+ uint32_t gpporcr4;
+ uint8_t res_030[0x60 - 0x30];
+ uint32_t dcfg_fusesr; /* Fuse status register */
+ uint8_t res_064[0x70 - 0x64];
+ uint32_t devdisr; /* Device disable control 1 */
+ uint32_t devdisr2; /* Device disable control 2 */
+ uint32_t devdisr3; /* Device disable control 3 */
+ uint32_t devdisr4; /* Device disable control 4 */
+ uint32_t devdisr5; /* Device disable control 5 */
+ uint32_t devdisr6; /* Device disable control 6 */
+ uint8_t res_088[0x94 - 0x88];
+ uint32_t coredisr; /* Device disable control 7 */
+ uint8_t res_098[0xa0 - 0x98];
+ uint32_t pvr; /* Processor version */
+ uint32_t svr; /* System version */
+ uint8_t res_0a8[0x100 - 0xa8];
+ uint32_t rcwsr[30]; /* Reset control word status */
+
+ uint8_t res_178[0x200 - 0x178];
+ uint32_t scratchrw[16]; /* Scratch Read/Write */
+ uint8_t res_240[0x300 - 0x240];
+ uint32_t scratchw1r[4]; /* Scratch Read (Write once) */
+ uint8_t res_310[0x400 - 0x310];
+ uint32_t bootlocptrl; /* Boot location pointer low-order addr */
+ uint32_t bootlocptrh; /* Boot location pointer high-order addr */
+ uint8_t res_408[0x520 - 0x408];
+ uint32_t usb1_amqr;
+ uint32_t usb2_amqr;
+ uint8_t res_528[0x530 - 0x528]; /* add more registers when needed */
+ uint32_t sdmm1_amqr;
+ uint32_t sdmm2_amqr;
+ uint8_t res_538[0x550 - 0x538]; /* add more registers when needed */
+ uint32_t sata1_amqr;
+ uint32_t sata2_amqr;
+ uint32_t sata3_amqr;
+ uint32_t sata4_amqr;
+ uint8_t res_560[0x570 - 0x560]; /* add more registers when needed */
+ uint32_t misc1_amqr;
+ uint8_t res_574[0x590 - 0x574]; /* add more registers when needed */
+ uint32_t spare1_amqr;
+ uint32_t spare2_amqr;
+ uint32_t spare3_amqr;
+ uint8_t res_59c[0x620 - 0x59c]; /* add more registers when needed */
+ uint32_t gencr[7]; /* General Control Registers */
+ uint8_t res_63c[0x640 - 0x63c]; /* add more registers when needed */
+ uint32_t cgensr1; /* Core General Status Register */
+ uint8_t res_644[0x660 - 0x644]; /* add more registers when needed */
+ uint32_t cgencr1; /* Core General Control Register */
+ uint8_t res_664[0x740 - 0x664]; /* add more registers when needed */
+ uint32_t tp_ityp[64]; /* Topology Initiator Type Register */
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } tp_cluster[4]; /* Core cluster n Topology Register */
+ uint8_t res_864[0x920 - 0x864]; /* add more registers when needed */
+ uint32_t ioqoscr[8]; /*I/O Quality of Services Register */
+ uint32_t uccr;
+ uint8_t res_944[0x960 - 0x944]; /* add more registers when needed */
+ uint32_t ftmcr;
+ uint8_t res_964[0x990 - 0x964]; /* add more registers when needed */
+ uint32_t coredisablesr;
+ uint8_t res_994[0xa00 - 0x994]; /* add more registers when needed */
+ uint32_t sdbgcr; /*Secure Debug Confifuration Register */
+ uint8_t res_a04[0xbf8 - 0xa04]; /* add more registers when needed */
+ uint32_t ipbrr1;
+ uint32_t ipbrr2;
+ uint8_t res_858[0x1000 - 0xc00];
+} __rte_packed;
+
+static void *lsx_ccsr_map_region(uint64_t addr, size_t len)
+{
+ int fd;
+ void *tmp;
+ uint64_t start;
+ uint64_t offset;
+
+ fd = open("/dev/mem", O_RDWR);
+ if (fd < 0) {
+ DPAA2_PMD_ERR("Fail to open /dev/mem");
+ return NULL;
+ }
+
+ start = addr & PAGE_MASK;
+ offset = addr - start;
+ len = len & PAGE_MASK;
+ if (len < (size_t)PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ tmp = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, start);
+
+ close(fd);
+
+ if (tmp != MAP_FAILED)
+ return (uint8_t *)tmp + offset;
+ else
+ return NULL;
+}
+
+static const uint8_t ls_sd1_prot_idx_map[] = {
+ 0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a,
+ 0x2b, 0x2d, 0x2e, 0x30, 0x32, 0x33, 0x35,
+ 0x37, 0x39, 0x3b, 0x4b, 0x4c, 0x4d, 0x58
+};
+
+static const uint8_t ls_sd2_prot_idx_map[] = {
+ 0x07, 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20,
+ 0x22, 0x24, 0x3d, 0x3f, 0x41, 0x43, 0x45,
+ 0x47, 0x49, 0x4f, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57
+};
+
+static const uint8_t ls_sd1_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x03*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x05*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x26*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x28*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x2a*/
+
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2d*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2e*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x30*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x32*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x33*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x35*/
+ {1, 1, 0, 0, 0, 0, 0, 0}, /* 0x37*/
+
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x39*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x3b*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x4b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x4c*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4d*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x58*/
+};
+
+static const uint8_t ls_sd2_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3d*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x41*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x43*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x45*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x47*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x49*/
+
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x50*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x51*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x52*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x53*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x54*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x55*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x56*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x57*/
+};
+
+enum lsx_serdes_id {
+ LSX_SERDES_1 = 1,
+ LSX_SERDES_2 = 2
+};
+
+static const uint8_t lx_sd1_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 2 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 3 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 4 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 5 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 6 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 7 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 8 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 9 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 10 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 11 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 13 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 14 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 15 prot*/
+ {0, 0, 1, 1, 0, 0, 0, 0}, /* 16 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 17 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 18 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 19 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 20 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1}, /* 21 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1} /* 22 prot*/
+};
+
+static const uint8_t lx_sd2_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 2 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 3 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 4 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 5 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 6 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 7 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 8 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 9 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 10 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 11 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 13 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1} /* 14 prot*/
+};
+
+static inline int
+ls_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 8)
+ return LSX_SERDES_1;
+ if (mac_id >= 9 && mac_id <= 16)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+lx_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 10)
+ return LSX_SERDES_1;
+ if (mac_id >= 11 && mac_id <= 18)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+ls_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id)
+{
+ int i;
+
+ if (sd_id == LSX_SERDES_1) {
+ for (i = 0; i < (int)sizeof(ls_sd1_prot_idx_map); i++) {
+ if (ls_sd1_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ } else if (sd_id == LSX_SERDES_2) {
+ for (i = 0; i < (int)sizeof(ls_sd2_prot_idx_map); i++) {
+ if (ls_sd2_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static inline int
+lx_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id __rte_unused)
+{
+ return sd_cfg;
+}
+
+static inline int
+ls_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *ls_sd_loopback_support;
+
+ sd_id = ls_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS1_REGSR - 1]) &
+ FSL_LS_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS2_REGSR - 1]) &
+ FSL_LS_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = ls_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0) {
+ DPAA2_PMD_ERR("Serdes protocol(0x%02x) does not exist\n",
+ sd_cfg);
+ return false;
+ }
+
+ if (sd_id == LSX_SERDES_1) {
+ ls_sd_loopback_support =
+ &ls_sd1_eth_loopback_support[sd_idx][0];
+ } else {
+ ls_sd_loopback_support =
+ &ls_sd2_eth_loopback_support[sd_idx][0];
+ }
+
+ if (sd_id == LSX_SERDES_1)
+ lan_id_tmp = (mac_id - 1);
+ else
+ lan_id_tmp = (mac_id - 9);
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB) {
+ DPAA2_PMD_ERR("Invalid serdes lan(%d).", lan_id_tmp);
+ return false;
+ }
+
+ if (!ls_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+lx_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *lx_sd_loopback_support;
+
+ sd_id = lx_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS1_REGSR - 1]) &
+ FSL_LX_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS2_REGSR - 1]) &
+ FSL_LX_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = lx_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0)
+ return false;
+
+ if (sd_id == LSX_SERDES_1)
+ lx_sd_loopback_support = &lx_sd1_loopback_support[sd_idx][0];
+ else
+ lx_sd_loopback_support = &lx_sd2_loopback_support[sd_idx][0];
+
+ if (sd_id == LSX_SERDES_1) {
+ if (mac_id == 1)
+ lan_id_tmp = 0;
+ else if (mac_id == 2)
+ lan_id_tmp = 4;
+ else
+ lan_id_tmp = (mac_id - 3);
+ } else {
+ if (mac_id == 11)
+ lan_id_tmp = 0;
+ else if (mac_id == 12)
+ lan_id_tmp = 1;
+ else if (mac_id == 13)
+ lan_id_tmp = 6;
+ else if (mac_id == 14)
+ lan_id_tmp = 7;
+ else if (mac_id == 15)
+ lan_id_tmp = 4;
+ else if (mac_id == 16)
+ lan_id_tmp = 5;
+ else if (mac_id == 17)
+ lan_id_tmp = 2;
+ else if (mac_id == 18)
+ lan_id_tmp = 3;
+ else
+ return false;
+ }
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB)
+ return false;
+
+ if (!lx_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+ls_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id, lan_id;
+ int ret;
+ uint32_t data;
+ struct ccsr_ls_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = ls_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_ls_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].tsc3;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+static inline int
+lx_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id = 0xffff, lan_id = 0xffff;
+ int ret;
+ uint32_t data;
+ struct ccsr_lx_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = lx_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_lx_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].lnatcsr0;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+/* Configure dpaa2 port as recycle port */
+int
+dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret;
+
+ if (priv->flags & DPAA2_TX_LOOPBACK_MODE) {
+ DPAA2_PMD_INFO("%s has been configured recycle device.",
+ eth_dev->data->name);
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_MAC) {
+ /** For dpmac-dpni connection,
+ * try setting serdes loopback as recycle device at first.
+ */
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+
+ /** If serdes loopback is not supported for this mac,
+ * trying set mac loopback.
+ */
+
+ port_cfg.loopback_en = 1;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to enable loopback", ret);
+ return -ENOTSUP;
+ }
+
+ priv->flags |= DPAA2_TX_MAC_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_ETH &&
+ dpaa2_dev->object_id == dpaa2_dev->ep_object_id) {
+ priv->flags |= DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+int
+dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret = 0;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE))
+ return 0;
+
+ if (priv->flags & DPAA2_TX_SERDES_LOOPBACK_MODE) {
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_MAC_LOOPBACK_MODE) {
+ port_cfg.loopback_en = 0;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to disable TX mac loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_MAC_LOOPBACK_MODE;
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_DPNI_LOOPBACK_MODE)
+ priv->flags &= ~DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return ret;
+}
+
+int
+dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *data;
+ struct dpaa2_queue *txq_tmp;
+ struct dpaa2_queue *rxq_tmp;
+ struct dpaa2_dev_priv *priv;
+
+ dev = dpaa2_dev->eth_dev;
+ data = dev->data;
+ priv = data->dev_private;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE) &&
+ (tx_lpbk || rx_lpbk)) {
+ DPAA2_PMD_ERR("%s is NOT recycle device!", data->name);
+
+ return -EINVAL;
+ }
+
+ if (qidx >= data->nb_rx_queues || qidx >= data->nb_tx_queues)
+ return -EINVAL;
+
+ rte_spinlock_lock(&priv->lpbk_qp_lock);
+
+ if (tx_lpbk)
+ dev->tx_pkt_burst = tx_lpbk;
+
+ if (rx_lpbk)
+ dev->rx_pkt_burst = rx_lpbk;
+
+ txq_tmp = data->tx_queues[qidx];
+ txq_tmp->lpbk_cntx = cntx;
+ rxq_tmp = data->rx_queues[qidx];
+ rxq_tmp->lpbk_cntx = cntx;
+
+ if (txq)
+ *txq = txq_tmp;
+ if (rxq)
+ *rxq = rxq_tmp;
+
+ rte_spinlock_unlock(&priv->lpbk_qp_lock);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index b7a65cb637..7a2bc15eb4 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -3087,3 +3087,35 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
+/**
+ * dpni_set_port_cfg() - performs configurations at physical port connected on
+ * this dpni. The command have effect only if dpni is connected to
+ * another dpni object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @flags: Valid fields from port_cfg structure
+ * @port_cfg: Configuration data; one or more of DPNI_PORT_CFG_
+ * The command can be called only when dpni is connected to a dpmac object. If
+ * the dpni is unconnected or the endpoint is not a dpni it will return error.
+ * If dpmac endpoint is disconnected the settings will be lost
+ */
+int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint32_t flags, struct dpni_port_cfg *port_cfg)
+{
+ struct dpni_cmd_set_port_cfg *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PORT_CFG,
+ cmd_flags, token);
+
+ cmd_params = (struct dpni_cmd_set_port_cfg *)cmd.params;
+ cmd_params->flags = cpu_to_le32(flags);
+ dpni_set_field(cmd_params->bit_params, PORT_LOOPBACK_EN,
+ !!port_cfg->loopback_en);
+
+ /* send command to MC */
+ return mc_send_command(mc_io, &cmd);
+}
+
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index ed0bd7615a..b7bd7556af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -119,6 +119,7 @@
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_PORT_CFG DPNI_CMD(0x27B)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
index 21b827a259..51598c048c 100644
--- a/drivers/net/dpaa2/meson.build
+++ b/drivers/net/dpaa2/meson.build
@@ -14,6 +14,7 @@ sources = files(
'dpaa2_mux.c',
'dpaa2_ethdev.c',
'dpaa2_flow.c',
+ 'dpaa2_recycle.c',
'dpaa2_rxtx.c',
'dpaa2_sparser.c',
'dpaa2_ptp.c',
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 11/16] net/dpaa: check status before configuring shared MAC
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (9 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 10/16] net/dpaa2: support recycle loopback port nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 12/16] net/dpaa: enable checksum for shared MAC interface nipun.gupta
` (5 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
For shared MAC interface, it is a prerequisite to enable the
interface in the kernel, before using it in user-space. This
patch makes sure that device is not getting configured in
case shared MAC interface is not enabled in the kernel.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/dpaa/base/fman/fman_hw.c | 11 +++++++++++
drivers/bus/dpaa/include/fsl_fman.h | 2 ++
drivers/bus/dpaa/version.map | 1 +
drivers/net/dpaa/dpaa_ethdev.c | 13 ++++++++++++-
4 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index af9bac76c2..24a99f7235 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -314,6 +314,17 @@ fman_if_disable_rx(struct fman_if *p)
out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
}
+int
+fman_if_get_rx_status(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* return true if RX bit is set */
+ return !!(in_be32(__if->ccsr_map + 8) & (u32)2);
+}
+
void
fman_if_loopback_enable(struct fman_if *p)
{
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index f3a5d05970..acb344584f 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -81,6 +81,8 @@ __rte_internal
void fman_if_enable_rx(struct fman_if *p);
__rte_internal
void fman_if_disable_rx(struct fman_if *p);
+__rte_internal
+int fman_if_get_rx_status(struct fman_if *p);
/* Enable/disable loopback on specific interfaces */
__rte_internal
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 900635b210..1a840fd1a5 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -33,6 +33,7 @@ INTERNAL {
fman_if_get_fdoff;
fman_if_get_maxfrm;
fman_if_get_sg_enable;
+ fman_if_get_rx_status;
fman_if_loopback_disable;
fman_if_loopback_enable;
fman_if_promiscuous_disable;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index e49f765434..3972ecaed8 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -195,6 +195,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint64_t tx_offloads = eth_conf->txmode.offloads;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct rte_device *rdev = dev->device;
struct rte_eth_link *link = &dev->data->dev_link;
struct rte_dpaa_device *dpaa_dev;
@@ -203,7 +204,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_intr_handle *intr_handle;
uint32_t max_rx_pktlen;
int speed, duplex;
- int ret;
+ int ret, rx_status;
PMD_INIT_FUNC_TRACE();
@@ -211,6 +212,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
intr_handle = dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
+ /* Check if interface is enabled in case of shared MAC */
+ if (fif->is_shared_mac) {
+ rx_status = fman_if_get_rx_status(fif);
+ if (!rx_status) {
+ DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+ dpaa_intf->name);
+ return -EHOSTDOWN;
+ }
+ }
+
/* Rx offloads which are enabled by default */
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_INFO(
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 12/16] net/dpaa: enable checksum for shared MAC interface
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (10 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 11/16] net/dpaa: check status before configuring shared MAC nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 13/16] net/enetc: add support for VFs nipun.gupta
` (4 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Brick Yang, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
In case of shared MAC B0V bit in contextA is required
to be set to set so that ASPID is 0.
Signed-off-by: Brick Yang <brick.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/dpaa/dpaa_ethdev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 3972ecaed8..7135a5998d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1755,6 +1755,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ if (fman_ip_rev >= FMAN_V3) {
+ /* Set B0V bit in contextA to set ASPID to 0 */
+ opts.fqd.context_a.hi |= 0x04000000;
+ }
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
if (cgr_tx) {
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 13/16] net/enetc: add support for VFs
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (11 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 12/16] net/dpaa: enable checksum for shared MAC interface nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 16:16 ` [PATCH v2 14/16] net/pfe: disable HW CRC stripping nipun.gupta
` (3 subsequent siblings)
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
Add virtual function support for enetc devices
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/enetc/enetc_ethdev.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 7cdb8ce463..1b4337bc48 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -19,6 +19,9 @@ enetc_dev_start(struct rte_eth_dev *dev)
uint32_t val;
PMD_INIT_FUNC_TRACE();
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
@@ -55,6 +58,9 @@ enetc_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->data->dev_started = 0;
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
/* Disable port */
val = enetc_port_rd(enetc_hw, ENETC_PMR);
enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
@@ -160,11 +166,20 @@ enetc_hardware_init(struct enetc_eth_hw *hw)
/* Enabling Station Interface */
enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
- *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
- high_mac = (uint32_t)*mac;
- mac++;
- *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
- low_mac = (uint16_t)*mac;
+
+ if (hw->device_id == ENETC_DEV_ID_VF) {
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
+ low_mac = (uint16_t)*mac;
+ } else {
+ *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
+ low_mac = (uint16_t)*mac;
+ }
if ((high_mac | low_mac) == 0) {
char *first_byte;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 14/16] net/pfe: disable HW CRC stripping
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (12 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 13/16] net/enetc: add support for VFs nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 17:49 ` Stephen Hemminger
2021-12-27 16:16 ` [PATCH v2 15/16] net/pfe: reduce driver initialization time nipun.gupta
` (2 subsequent siblings)
16 siblings, 1 reply; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
LS1012A MAC PCS block has an erratum that is seen with specific
PHY AR803x. The issue is triggered by the (spec-compliant)
operation of the AR803x PHY on the LS1012A-FRWY board.
Due to this, good FCS packet is reported as error packet by MAC,
so for these error packets FCS should be validated and discard
only real error packets in PFE engine Rx packet path. Now onwards
CRC validation will be handled in pfe.ko and DPDK driver can not
use CRC Forwarding option.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/pfe/pfe_ethdev.c | 7 +++++--
drivers/net/pfe/pfe_hal.c | 4 ++--
2 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 047010e15e..bfcaf51dd9 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include <sys/ioctl.h>
@@ -422,8 +422,11 @@ pfe_eth_close(struct rte_eth_dev *dev)
}
static int
-pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
+pfe_eth_configure(struct rte_eth_dev *dev)
{
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ PFE_PMD_ERR("PMD does not support KEEP_CRC offload");
+
return 0;
}
diff --git a/drivers/net/pfe/pfe_hal.c b/drivers/net/pfe/pfe_hal.c
index 41d783dbff..f49d1728b2 100644
--- a/drivers/net/pfe/pfe_hal.c
+++ b/drivers/net/pfe/pfe_hal.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include <arpa/inet.h>
@@ -191,7 +191,7 @@ gemac_set_mode(void *base, __rte_unused int mode)
val &= ~EMAC_RCNTRL_LOOP;
/*Enable flow control and MII mode*/
- val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE | EMAC_RCNTRL_CRC_FWD);
+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
writel(val, base + EMAC_RCNTRL_REG);
}
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v2 14/16] net/pfe: disable HW CRC stripping
2021-12-27 16:16 ` [PATCH v2 14/16] net/pfe: disable HW CRC stripping nipun.gupta
@ 2021-12-27 17:49 ` Stephen Hemminger
2022-01-03 6:09 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: Stephen Hemminger @ 2021-12-27 17:49 UTC (permalink / raw)
To: nipun.gupta; +Cc: dev, thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
On Mon, 27 Dec 2021 21:46:43 +0530
nipun.gupta@nxp.com wrote:
> @@ -422,8 +422,11 @@ pfe_eth_close(struct rte_eth_dev *dev)
> }
>
> static int
> -pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
> +pfe_eth_configure(struct rte_eth_dev *dev)
> {
> + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
> + PFE_PMD_ERR("PMD does not support KEEP_CRC offload");
> +
> return 0;
> }
>
Why is this necessary? There is already a check for offload capabilities in
rte_eth_dev_configure(). The device should report correct supported offloads instead.
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v2 14/16] net/pfe: disable HW CRC stripping
2021-12-27 17:49 ` Stephen Hemminger
@ 2022-01-03 6:09 ` Nipun Gupta
0 siblings, 0 replies; 68+ messages in thread
From: Nipun Gupta @ 2022-01-03 6:09 UTC (permalink / raw)
To: Stephen Hemminger
Cc: dev, thomas, ferruh.yigit, Hemant Agrawal, Gagandeep Singh
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: 27 December 2021 23:19
> To: Nipun Gupta <nipun.gupta@nxp.com>
> Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Gagandeep Singh <G.Singh@nxp.com>
> Subject: Re: [PATCH v2 14/16] net/pfe: disable HW CRC stripping
>
> On Mon, 27 Dec 2021 21:46:43 +0530
> nipun.gupta@nxp.com wrote:
>
> > @@ -422,8 +422,11 @@ pfe_eth_close(struct rte_eth_dev *dev)
> > }
> >
> > static int
> > -pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
> > +pfe_eth_configure(struct rte_eth_dev *dev)
> > {
> > + if (dev->data->dev_conf.rxmode.offloads &
> DEV_RX_OFFLOAD_KEEP_CRC)
> > + PFE_PMD_ERR("PMD does not support KEEP_CRC offload");
> > +
> > return 0;
> > }
> >
>
> Why is this necessary? There is already a check for offload capabilities in
> rte_eth_dev_configure(). The device should report correct supported offloads
> instead.
Agree. Will remove this patch.
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 15/16] net/pfe: reduce driver initialization time
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (13 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 14/16] net/pfe: disable HW CRC stripping nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 17:57 ` Stephen Hemminger
2021-12-27 16:16 ` [PATCH v2 16/16] net/pfe: remove setting unused value nipun.gupta
2021-12-27 17:50 ` [PATCH v2 00/16] features and fixes on NXP eth devices Stephen Hemminger
16 siblings, 1 reply; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
This patch reduces the delay in the device init.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/pfe/pfe_hif.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
index c4a7154ba7..eade726b2e 100644
--- a/drivers/net/pfe/pfe_hif.c
+++ b/drivers/net/pfe/pfe_hif.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2020 NXP
*/
#include "pfe_logs.h"
@@ -9,6 +9,8 @@
#include <sys/eventfd.h>
#include <arpa/inet.h>
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define msleep(x) rte_delay_us(1000 * (x))
static int
pfe_hif_alloc_descr(struct pfe_hif *hif)
{
@@ -766,7 +768,7 @@ pfe_hif_rx_idle(struct pfe_hif *hif)
if (rx_status & BDP_CSR_RX_DMA_ACTV)
send_dummy_pkt_to_hif();
- sleep(1);
+ msleep(DIV_ROUND_UP(100, 1000));
} while (--hif_stop_loop);
if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v2 15/16] net/pfe: reduce driver initialization time
2021-12-27 16:16 ` [PATCH v2 15/16] net/pfe: reduce driver initialization time nipun.gupta
@ 2021-12-27 17:57 ` Stephen Hemminger
2022-01-03 5:45 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: Stephen Hemminger @ 2021-12-27 17:57 UTC (permalink / raw)
To: nipun.gupta; +Cc: dev, thomas, ferruh.yigit, hemant.agrawal, Gagandeep Singh
On Mon, 27 Dec 2021 21:46:44 +0530
nipun.gupta@nxp.com wrote:
> From: Gagandeep Singh <g.singh@nxp.com>
>
> This patch reduces the delay in the device init.
>
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
> drivers/net/pfe/pfe_hif.c | 6 ++++--
> 1 file changed, 4 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
> index c4a7154ba7..eade726b2e 100644
> --- a/drivers/net/pfe/pfe_hif.c
> +++ b/drivers/net/pfe/pfe_hif.c
> @@ -1,5 +1,5 @@
> /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright 2018-2019 NXP
> + * Copyright 2018-2020 NXP
> */
>
> #include "pfe_logs.h"
> @@ -9,6 +9,8 @@
> #include <sys/eventfd.h>
> #include <arpa/inet.h>
>
> +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> +#define msleep(x) rte_delay_us(1000 * (x))
> static int
> pfe_hif_alloc_descr(struct pfe_hif *hif)
> {
> @@ -766,7 +768,7 @@ pfe_hif_rx_idle(struct pfe_hif *hif)
> if (rx_status & BDP_CSR_RX_DMA_ACTV)
> send_dummy_pkt_to_hif();
>
> - sleep(1);
> + msleep(DIV_ROUND_UP(100, 1000));
Why not just
rte_delay_ms(1);
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v2 15/16] net/pfe: reduce driver initialization time
2021-12-27 17:57 ` Stephen Hemminger
@ 2022-01-03 5:45 ` Nipun Gupta
0 siblings, 0 replies; 68+ messages in thread
From: Nipun Gupta @ 2022-01-03 5:45 UTC (permalink / raw)
To: Stephen Hemminger
Cc: dev, thomas, ferruh.yigit, Hemant Agrawal, Gagandeep Singh
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: 27 December 2021 23:28
> To: Nipun Gupta <nipun.gupta@nxp.com>
> Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> Agrawal <hemant.agrawal@nxp.com>; Gagandeep Singh <G.Singh@nxp.com>
> Subject: Re: [PATCH v2 15/16] net/pfe: reduce driver initialization time
>
> On Mon, 27 Dec 2021 21:46:44 +0530
> nipun.gupta@nxp.com wrote:
>
> > From: Gagandeep Singh <g.singh@nxp.com>
> >
> > This patch reduces the delay in the device init.
> >
> > Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > ---
> > drivers/net/pfe/pfe_hif.c | 6 ++++--
> > 1 file changed, 4 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
> > index c4a7154ba7..eade726b2e 100644
> > --- a/drivers/net/pfe/pfe_hif.c
> > +++ b/drivers/net/pfe/pfe_hif.c
> > @@ -1,5 +1,5 @@
> > /* SPDX-License-Identifier: BSD-3-Clause
> > - * Copyright 2018-2019 NXP
> > + * Copyright 2018-2020 NXP
> > */
> >
> > #include "pfe_logs.h"
> > @@ -9,6 +9,8 @@
> > #include <sys/eventfd.h>
> > #include <arpa/inet.h>
> >
> > +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> > +#define msleep(x) rte_delay_us(1000 * (x))
> > static int
> > pfe_hif_alloc_descr(struct pfe_hif *hif)
> > {
> > @@ -766,7 +768,7 @@ pfe_hif_rx_idle(struct pfe_hif *hif)
> > if (rx_status & BDP_CSR_RX_DMA_ACTV)
> > send_dummy_pkt_to_hif();
> >
> > - sleep(1);
> > + msleep(DIV_ROUND_UP(100, 1000));
>
> Why not just
> rte_delay_ms(1);
Will update.
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v2 16/16] net/pfe: remove setting unused value
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (14 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 15/16] net/pfe: reduce driver initialization time nipun.gupta
@ 2021-12-27 16:16 ` nipun.gupta
2021-12-27 17:50 ` [PATCH v2 00/16] features and fixes on NXP eth devices Stephen Hemminger
16 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2021-12-27 16:16 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, Apeksha Gupta
From: Apeksha Gupta <apeksha.gupta@nxp.com>
remove setting link status where it is not being used
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
drivers/net/pfe/pfe_ethdev.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index bfcaf51dd9..5a3008cbb5 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -590,8 +590,7 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
if (ret != 0) {
PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
- /* use dummy link value */
- link.link_status = 1;
+ return -1;
}
PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
lstatus, priv->id);
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v2 00/16] features and fixes on NXP eth devices
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
` (15 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 16/16] net/pfe: remove setting unused value nipun.gupta
@ 2021-12-27 17:50 ` Stephen Hemminger
2022-01-03 5:45 ` Nipun Gupta
16 siblings, 1 reply; 68+ messages in thread
From: Stephen Hemminger @ 2021-12-27 17:50 UTC (permalink / raw)
To: nipun.gupta; +Cc: dev, thomas, ferruh.yigit, hemant.agrawal
On Mon, 27 Dec 2021 21:46:29 +0530
nipun.gupta@nxp.com wrote:
> From: Nipun Gupta <nipun.gupta@nxp.com>
>
> This series adds few features and important fixes on DPAA,
> PFE and ENETC devices.
>
> Features added:
> - level 2 support for shaping on DPAA2
> - loopback configuration for DPNI devices on DPAA2
> - Multiple TXQ's enqueue for ordered queues for performance
> - VFs support on ENETC
>
> Fixes:
> - fix unregistering interrupt handler on DPAA2
> - fix timestamping for IEEE1588 on DPAA1
>
> Changes in v2:
> - fix checkpatch errors
>
> Apeksha Gupta (1):
> net/pfe: remove setting unused value
>
> Gagandeep Singh (4):
> net/dpaa2: add support for level 2 in traffic management
> net/enetc: add support for VFs
> net/pfe: disable HW CRC stripping
> net/pfe: reduce driver initialization time
>
> Jun Yang (4):
> net/dpaa2: support multiple txqs en-queue for ordered
> net/dpaa2: secondary process handling for dpni
> bus/fslmc: add and scan dprc devices
> net/dpaa2: support recycle loopback port
>
> Nipun Gupta (4):
> bus/fslmc: update MC to 10.29
> bus/fslmc: use dmb oshst for synchronization before I/O
> net/dpaa: check status before configuring shared MAC
> net/dpaa: enable checksum for shared MAC interface
>
> Rohit Raj (1):
> net/dpaa2: warn user in case of high nb desc
>
> Vanshika Shukla (2):
> net/dpaa2: fix unregistering interrupt handler
> net/dpaa2: fix timestamping for IEEE1588
>
> doc/guides/nics/dpaa2.rst | 2 +-
> drivers/bus/dpaa/base/fman/fman_hw.c | 11 +
> drivers/bus/dpaa/include/fsl_fman.h | 2 +
> drivers/bus/dpaa/version.map | 1 +
> drivers/bus/fslmc/fslmc_bus.c | 15 +-
> drivers/bus/fslmc/fslmc_vfio.c | 18 +-
> drivers/bus/fslmc/mc/dprc.c | 129 ++++
> drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
> drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++
> drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 ++
> drivers/bus/fslmc/meson.build | 4 +-
> drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 +++
> drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 15 +-
> drivers/bus/fslmc/qbman/include/compat.h | 4 +-
> drivers/bus/fslmc/rte_fslmc.h | 10 +-
> drivers/event/dpaa2/dpaa2_eventdev.c | 12 +-
> drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +
> drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +
> drivers/mempool/dpaa2/version.map | 1 +
> drivers/net/dpaa/dpaa_ethdev.c | 17 +-
> drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 +-
> drivers/net/dpaa2/dpaa2_ethdev.c | 117 +++-
> drivers/net/dpaa2/dpaa2_ethdev.h | 38 +-
> drivers/net/dpaa2/dpaa2_ptp.c | 8 +-
> drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++
> drivers/net/dpaa2/dpaa2_rxtx.c | 181 ++++-
> drivers/net/dpaa2/dpaa2_tm.c | 563 +++++++++++++---
> drivers/net/dpaa2/dpaa2_tm.h | 17 +-
> drivers/net/dpaa2/mc/dpdmux.c | 8 +
> drivers/net/dpaa2/mc/dpkg.c | 7 +-
> drivers/net/dpaa2/mc/dpni.c | 417 ++++++++----
> drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
> drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
> drivers/net/dpaa2/mc/fsl_dpni.h | 173 +++--
> drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 137 ++--
> drivers/net/dpaa2/meson.build | 1 +
> drivers/net/dpaa2/version.map | 1 +
> drivers/net/enetc/enetc_ethdev.c | 25 +-
> drivers/net/pfe/pfe_ethdev.c | 10 +-
> drivers/net/pfe/pfe_hal.c | 4 +-
> drivers/net/pfe/pfe_hif.c | 6 +-
> 41 files changed, 2528 insertions(+), 453 deletions(-)
> create mode 100644 drivers/bus/fslmc/mc/dprc.c
> create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
> create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
> create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
> create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
>
In general best to split the patch series by device not by vendor.
That way it is easier to review and changes to one device can go in
if there are still issues with other devices
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v2 00/16] features and fixes on NXP eth devices
2021-12-27 17:50 ` [PATCH v2 00/16] features and fixes on NXP eth devices Stephen Hemminger
@ 2022-01-03 5:45 ` Nipun Gupta
0 siblings, 0 replies; 68+ messages in thread
From: Nipun Gupta @ 2022-01-03 5:45 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: dev, thomas, ferruh.yigit, Hemant Agrawal
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: 27 December 2021 23:21
> To: Nipun Gupta <nipun.gupta@nxp.com>
> Cc: dev@dpdk.org; thomas@monjalon.net; ferruh.yigit@intel.com; Hemant
> Agrawal <hemant.agrawal@nxp.com>
> Subject: Re: [PATCH v2 00/16] features and fixes on NXP eth devices
>
> On Mon, 27 Dec 2021 21:46:29 +0530
> nipun.gupta@nxp.com wrote:
>
> > From: Nipun Gupta <nipun.gupta@nxp.com>
> >
> > This series adds few features and important fixes on DPAA,
> > PFE and ENETC devices.
> >
> > Features added:
> > - level 2 support for shaping on DPAA2
> > - loopback configuration for DPNI devices on DPAA2
> > - Multiple TXQ's enqueue for ordered queues for performance
> > - VFs support on ENETC
> >
> > Fixes:
> > - fix unregistering interrupt handler on DPAA2
> > - fix timestamping for IEEE1588 on DPAA1
> >
> > Changes in v2:
> > - fix checkpatch errors
> >
> > Apeksha Gupta (1):
> > net/pfe: remove setting unused value
> >
> > Gagandeep Singh (4):
> > net/dpaa2: add support for level 2 in traffic management
> > net/enetc: add support for VFs
> > net/pfe: disable HW CRC stripping
> > net/pfe: reduce driver initialization time
> >
> > Jun Yang (4):
> > net/dpaa2: support multiple txqs en-queue for ordered
> > net/dpaa2: secondary process handling for dpni
> > bus/fslmc: add and scan dprc devices
> > net/dpaa2: support recycle loopback port
> >
> > Nipun Gupta (4):
> > bus/fslmc: update MC to 10.29
> > bus/fslmc: use dmb oshst for synchronization before I/O
> > net/dpaa: check status before configuring shared MAC
> > net/dpaa: enable checksum for shared MAC interface
> >
> > Rohit Raj (1):
> > net/dpaa2: warn user in case of high nb desc
> >
> > Vanshika Shukla (2):
> > net/dpaa2: fix unregistering interrupt handler
> > net/dpaa2: fix timestamping for IEEE1588
> >
> > doc/guides/nics/dpaa2.rst | 2 +-
> > drivers/bus/dpaa/base/fman/fman_hw.c | 11 +
> > drivers/bus/dpaa/include/fsl_fman.h | 2 +
> > drivers/bus/dpaa/version.map | 1 +
> > drivers/bus/fslmc/fslmc_bus.c | 15 +-
> > drivers/bus/fslmc/fslmc_vfio.c | 18 +-
> > drivers/bus/fslmc/mc/dprc.c | 129 ++++
> > drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
> > drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++
> > drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 ++
> > drivers/bus/fslmc/meson.build | 4 +-
> > drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 +++
> > drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 15 +-
> > drivers/bus/fslmc/qbman/include/compat.h | 4 +-
> > drivers/bus/fslmc/rte_fslmc.h | 10 +-
> > drivers/event/dpaa2/dpaa2_eventdev.c | 12 +-
> > drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +
> > drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +
> > drivers/mempool/dpaa2/version.map | 1 +
> > drivers/net/dpaa/dpaa_ethdev.c | 17 +-
> > drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 +-
> > drivers/net/dpaa2/dpaa2_ethdev.c | 117 +++-
> > drivers/net/dpaa2/dpaa2_ethdev.h | 38 +-
> > drivers/net/dpaa2/dpaa2_ptp.c | 8 +-
> > drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++
> > drivers/net/dpaa2/dpaa2_rxtx.c | 181 ++++-
> > drivers/net/dpaa2/dpaa2_tm.c | 563 +++++++++++++---
> > drivers/net/dpaa2/dpaa2_tm.h | 17 +-
> > drivers/net/dpaa2/mc/dpdmux.c | 8 +
> > drivers/net/dpaa2/mc/dpkg.c | 7 +-
> > drivers/net/dpaa2/mc/dpni.c | 417 ++++++++----
> > drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
> > drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
> > drivers/net/dpaa2/mc/fsl_dpni.h | 173 +++--
> > drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 137 ++--
> > drivers/net/dpaa2/meson.build | 1 +
> > drivers/net/dpaa2/version.map | 1 +
> > drivers/net/enetc/enetc_ethdev.c | 25 +-
> > drivers/net/pfe/pfe_ethdev.c | 10 +-
> > drivers/net/pfe/pfe_hal.c | 4 +-
> > drivers/net/pfe/pfe_hif.c | 6 +-
> > 41 files changed, 2528 insertions(+), 453 deletions(-)
> > create mode 100644 drivers/bus/fslmc/mc/dprc.c
> > create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
> > create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
> > create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
> > create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
> >
>
> In general best to split the patch series by device not by vendor.
> That way it is easier to review and changes to one device can go in
> if there are still issues with other devices
Agree !! Will send it separately from next time. This time though I
would send like to them in this only as otherwise it would break the
versioning.
Thanks,
Nipun
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 00/15] features and fixes on NXP eth devices
2021-12-06 12:18 [PATCH 00/17] features and fixes on NXP eth devices nipun.gupta
` (17 preceding siblings ...)
2021-12-27 16:16 ` [PATCH v2 00/16] features and fixes on NXP eth devices nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 01/15] bus/fslmc: update MC to 10.29 nipun.gupta
` (15 more replies)
18 siblings, 16 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen
From: Nipun Gupta <nipun.gupta@nxp.com>
This series adds few features and important fixes on DPAA,
PFE and ENETC devices.
Features added:
- level 2 support for shaping on DPAA2
- loopback configuration for DPNI devices on DPAA2
- Multiple TXQ's enqueue for ordered queues for performance
- VFs support on ENETC
Fixes:
- fix unregistering interrupt handler on DPAA2
- fix timestamping for IEEE1588 on DPAA1
Changes in v2:
- fix checkpatch errors
Changes in v3:
- remove unrequired PFE HW checksum patch
- use predefined API for adding delay
- use macro value for allocating mbuf in event
Apeksha Gupta (1):
net/pfe: remove setting unused value
Gagandeep Singh (3):
net/dpaa2: add support for level 2 in traffic management
net/enetc: add support for VFs
net/pfe: reduce driver initialization time
Jun Yang (4):
net/dpaa2: support multiple txqs en-queue for ordered
net/dpaa2: secondary process handling for dpni
bus/fslmc: add and scan dprc devices
net/dpaa2: support recycle loopback port
Nipun Gupta (4):
bus/fslmc: update MC to 10.29
bus/fslmc: use dmb oshst for synchronization before I/O
net/dpaa: check status before configuring shared MAC
net/dpaa: enable checksum for shared MAC interface
Rohit Raj (1):
net/dpaa2: warn user in case of high nb desc
Vanshika Shukla (2):
net/dpaa2: fix unregistering interrupt handler
net/dpaa2: fix timestamping for IEEE1588
doc/guides/nics/dpaa2.rst | 2 +-
drivers/bus/dpaa/base/fman/fman_hw.c | 11 +
drivers/bus/dpaa/include/fsl_fman.h | 2 +
drivers/bus/dpaa/version.map | 1 +
drivers/bus/fslmc/fslmc_bus.c | 15 +-
drivers/bus/fslmc/fslmc_vfio.c | 18 +-
drivers/bus/fslmc/mc/dprc.c | 129 ++++
drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++
drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 ++
drivers/bus/fslmc/meson.build | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 +++
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 15 +-
drivers/bus/fslmc/qbman/include/compat.h | 4 +-
drivers/bus/fslmc/rte_fslmc.h | 10 +-
drivers/event/dpaa2/dpaa2_eventdev.c | 12 +-
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +
drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +
drivers/mempool/dpaa2/version.map | 1 +
drivers/net/dpaa/dpaa_ethdev.c | 17 +-
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 117 +++-
drivers/net/dpaa2/dpaa2_ethdev.h | 38 +-
drivers/net/dpaa2/dpaa2_ptp.c | 8 +-
drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++
drivers/net/dpaa2/dpaa2_rxtx.c | 181 ++++-
drivers/net/dpaa2/dpaa2_tm.c | 563 +++++++++++++---
drivers/net/dpaa2/dpaa2_tm.h | 17 +-
drivers/net/dpaa2/mc/dpdmux.c | 8 +
drivers/net/dpaa2/mc/dpkg.c | 7 +-
drivers/net/dpaa2/mc/dpni.c | 417 ++++++++----
drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
drivers/net/dpaa2/mc/fsl_dpni.h | 173 +++--
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 137 ++--
drivers/net/dpaa2/meson.build | 1 +
drivers/net/dpaa2/version.map | 1 +
drivers/net/enetc/enetc_ethdev.c | 25 +-
drivers/net/pfe/pfe_ethdev.c | 3 +-
drivers/net/pfe/pfe_hif.c | 4 +-
40 files changed, 2519 insertions(+), 449 deletions(-)
create mode 100644 drivers/bus/fslmc/mc/dprc.c
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 01/15] bus/fslmc: update MC to 10.29
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 02/15] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
` (14 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev
Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Nipun Gupta,
Gagandeep Singh
From: Nipun Gupta <nipun.gupta@nxp.com>
update MC firmware library version to 10.29
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
drivers/net/dpaa2/mc/dpdmux.c | 8 ++
drivers/net/dpaa2/mc/dpkg.c | 7 +-
drivers/net/dpaa2/mc/dpni.c | 111 ++++++++++++++++++++------
drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
drivers/net/dpaa2/mc/fsl_dpni.h | 54 ++++++++++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 57 +++++++------
8 files changed, 181 insertions(+), 66 deletions(-)
diff --git a/drivers/bus/fslmc/mc/fsl_dpmng.h b/drivers/bus/fslmc/mc/fsl_dpmng.h
index 7e9bd96429..073d47efbf 100644
--- a/drivers/bus/fslmc/mc/fsl_dpmng.h
+++ b/drivers/bus/fslmc/mc/fsl_dpmng.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
* Management Complex firmware version information
*/
#define MC_VER_MAJOR 10
-#define MC_VER_MINOR 28
+#define MC_VER_MINOR 29
/**
* struct mc_version
diff --git a/drivers/net/dpaa2/mc/dpdmux.c b/drivers/net/dpaa2/mc/dpdmux.c
index edbb01b45b..1bb153cad7 100644
--- a/drivers/net/dpaa2/mc/dpdmux.c
+++ b/drivers/net/dpaa2/mc/dpdmux.c
@@ -398,6 +398,9 @@ int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
attr->mem_size = le16_to_cpu(rsp_params->mem_size);
attr->default_if = le16_to_cpu(rsp_params->default_if);
+ attr->max_dmat_entries = le16_to_cpu(rsp_params->max_dmat_entries);
+ attr->max_mc_groups = le16_to_cpu(rsp_params->max_mc_groups);
+ attr->max_vlan_ids = le16_to_cpu(rsp_params->max_vlan_ids);
return 0;
}
@@ -470,6 +473,11 @@ int dpdmux_if_disable(struct fsl_mc_io *mc_io,
* will be updated with the minimum value of the mfls of the connected
* dpnis and the actual value of dmux mfl.
*
+ * If dpdmux object is created using DPDMUX_OPT_AUTO_MAX_FRAME_LEN and maximum
+ * frame length is changed for a dpni connected to dpdmux interface the change
+ * is propagated through dpdmux interfaces and will overwrite the value set using
+ * this API.
+ *
* Return: '0' on Success; Error code otherwise.
*/
int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/dpkg.c b/drivers/net/dpaa2/mc/dpkg.c
index 1e171eedc7..4789976b7d 100644
--- a/drivers/net/dpaa2/mc/dpkg.c
+++ b/drivers/net/dpaa2/mc/dpkg.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
*
- * Copyright 2017 NXP
+ * Copyright 2017-2021 NXP
*
*/
#include <fsl_mc_sys.h>
@@ -63,10 +63,7 @@ dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf)
dpkg_set_field(extr->extract_type, EXTRACT_TYPE,
cfg->extracts[i].type);
- if (extr->num_of_byte_masks > DPKG_NUM_OF_MASKS)
- return -EINVAL;
-
- for (j = 0; j < extr->num_of_byte_masks; j++) {
+ for (j = 0; j < DPKG_NUM_OF_MASKS; j++) {
extr->masks[j].mask = cfg->extracts[i].masks[j].mask;
extr->masks[j].offset =
cfg->extracts[i].masks[j].offset;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 60048d6c43..cf78295d90 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -128,6 +128,7 @@ int dpni_create(struct fsl_mc_io *mc_io,
cmd_params->num_cgs = cfg->num_cgs;
cmd_params->num_opr = cfg->num_opr;
cmd_params->dist_key_size = cfg->dist_key_size;
+ cmd_params->num_channels = cfg->num_channels;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -203,7 +204,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
cmd_params->pool_options = cfg->pool_options;
- for (i = 0; i < cmd_params->num_dpbp; i++) {
+ for (i = 0; i < DPNI_MAX_DPBP; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
@@ -592,6 +593,7 @@ int dpni_get_attributes(struct fsl_mc_io *mc_io,
attr->num_tx_tcs = rsp_params->num_tx_tcs;
attr->mac_filter_entries = rsp_params->mac_filter_entries;
attr->vlan_filter_entries = rsp_params->vlan_filter_entries;
+ attr->num_channels = rsp_params->num_channels;
attr->qos_entries = rsp_params->qos_entries;
attr->fs_entries = le16_to_cpu(rsp_params->fs_entries);
attr->qos_key_size = rsp_params->qos_key_size;
@@ -815,6 +817,9 @@ int dpni_get_offload(struct fsl_mc_io *mc_io,
* in all enqueue operations
*
* Return: '0' on Success; Error code otherwise.
+ *
+ * If dpni object is created using multiple Tc channels this function will return
+ * qdid value for the first channel
*/
int dpni_get_qdid(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -958,7 +963,12 @@ int dpni_get_link_state(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @tx_cr_shaper: TX committed rate shaping configuration
* @tx_er_shaper: TX excess rate shaping configuration
- * @coupled: Committed and excess rate shapers are coupled
+ * @param: Special parameters
+ * bit0: Committed and excess rates are coupled
+ * bit1: 1 modify LNI shaper, 0 modify channel shaper
+ * bit8-15: Tx channel to be shaped. Used only if bit1 is set to zero
+ * bits16-26: OAL (Overhead accounting length 11bit value). Used only
+ * when bit1 is set.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -967,10 +977,13 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled)
+ uint32_t param)
{
struct dpni_cmd_set_tx_shaping *cmd_params;
struct mc_command cmd = { 0 };
+ int coupled, lni_shaper;
+ uint8_t channel_id;
+ uint16_t oal;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
@@ -985,7 +998,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
cpu_to_le32(tx_cr_shaper->rate_limit);
cmd_params->tx_er_rate_limit =
cpu_to_le32(tx_er_shaper->rate_limit);
- dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+ coupled = !!(param & 0x01);
+ dpni_set_field(cmd_params->options, COUPLED, coupled);
+
+ lni_shaper = !!((param >> 1) & 0x01);
+ dpni_set_field(cmd_params->options, LNI_SHAPER, lni_shaper);
+
+ channel_id = (param >> 8) & 0xff;
+ cmd_params->channel_id = channel_id;
+
+ oal = (param >> 16) & 0x7FF;
+ cmd_params->oal = cpu_to_le16(oal);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1543,6 +1567,7 @@ int dpni_set_tx_priorities(struct fsl_mc_io *mc_io,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_tx_priorities *)cmd.params;
+ cmd_params->channel_idx = cfg->channel_idx;
dpni_set_field(cmd_params->flags,
SEPARATE_GRP,
cfg->separate_groups);
@@ -2053,7 +2078,13 @@ void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled
* with the early-drop configuration by calling dpni_prepare_early_drop()
*
@@ -2066,7 +2097,7 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2078,7 +2109,8 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2091,7 +2123,13 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - only Rx and Tx types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory
*
* warning: After calling this function, call dpni_extract_early_drop() to
@@ -2103,7 +2141,7 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova)
{
struct dpni_cmd_early_drop *cmd_params;
@@ -2115,7 +2153,8 @@ int dpni_get_early_drop(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_early_drop *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->early_drop_iova = cpu_to_le64(early_drop_iova);
/* send command to mc*/
@@ -2138,8 +2177,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
@@ -2151,7 +2190,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = (uint8_t)cfg->cgid;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
@@ -2179,7 +2219,8 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
- * @tc_id: Traffic class selection (0-7)
+ * @param: Traffic class and channel. Bits[0-7] contain traaffic class,
+ * byte[8-15] contains channel id
* @cfg: congestion notification configuration
*
* Return: '0' on Success; error code otherwise.
@@ -2188,8 +2229,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg)
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_rsp_get_congestion_notification *rsp_params;
struct dpni_cmd_get_congestion_notification *cmd_params;
@@ -2203,7 +2244,8 @@ int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc_id;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->congestion_point = cfg->cg_point;
cmd_params->cgid = cfg->cgid;
@@ -2280,7 +2322,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue)
@@ -2294,7 +2336,8 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->options = options;
cmd_params->dest_id = cpu_to_le32(queue->destination.id);
@@ -2317,7 +2360,13 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - all queue types are supported
- * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @param: Traffic class and channel ID.
+ * MSB - channel id; used only for DPNI_QUEUE_TX and DPNI_QUEUE_TX_CONFIRM,
+ * ignored for the rest
+ * LSB - traffic class
+ * Use macro DPNI_BUILD_PARAM() to build correct value.
+ * If dpni uses a single channel (uses only channel zero) the parameter can receive
+ * traffic class directly.
* @index: Selects the specific queue out of the set allocated for the
* same TC. Value must be in range 0 to NUM_QUEUES - 1
* @queue: Queue configuration structure
@@ -2329,7 +2378,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid)
@@ -2345,8 +2394,9 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_get_queue *)cmd.params;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
cmd_params->index = index;
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
/* send command to mc */
err = mc_send_command(mc_io, &cmd);
@@ -2382,8 +2432,16 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
* @token: Token of DPNI object
* @page: Selects the statistics page to retrieve, see
* DPNI_GET_STATISTICS output. Pages are numbered 0 to 6.
- * @param: Custom parameter for some pages used to select
- * a certain statistic source, for example the TC.
+ * @param: Custom parameter for some pages used to select
+ * a certain statistic source, for example the TC.
+ * - page_0: not used
+ * - page_1: not used
+ * - page_2: not used
+ * - page_3: high_byte - channel_id, low_byte - traffic class
+ * - page_4: high_byte - queue_index have meaning only if dpni is
+ * created using option DPNI_OPT_CUSTOM_CG, low_byte - traffic class
+ * - page_5: not used
+ * - page_6: not used
* @stat: Structure containing the statistics
*
* Return: '0' on Success; Error code otherwise.
@@ -2471,7 +2529,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_taildrop *taildrop)
{
@@ -2485,7 +2543,8 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params;
cmd_params->congestion_point = cg_point;
cmd_params->qtype = qtype;
- cmd_params->tc = tc;
+ cmd_params->tc = (uint8_t)(param & 0xff);
+ cmd_params->channel_id = (uint8_t)((param >> 8) & 0xff);
cmd_params->index = index;
cmd_params->units = taildrop->units;
cmd_params->threshold = cpu_to_le32(taildrop->threshold);
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux.h b/drivers/net/dpaa2/mc/fsl_dpdmux.h
index b01a98eb59..4600ea94d4 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux.h
@@ -184,6 +184,9 @@ struct dpdmux_attr {
uint16_t num_ifs;
uint16_t mem_size;
uint16_t default_if;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
index f8a1b5b1ae..bf6b8a20d1 100644
--- a/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h
@@ -35,7 +35,7 @@
#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
-#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V2(0x004)
+#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD_V3(0x004)
#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
@@ -119,6 +119,9 @@ struct dpdmux_rsp_get_attr {
uint32_t pad2;
uint64_t options;
+ uint16_t max_dmat_entries;
+ uint16_t max_mc_groups;
+ uint16_t max_vlan_ids;
};
struct dpdmux_cmd_set_max_frame_length {
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 469ab9b3d4..8aead28261 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -36,6 +36,10 @@ struct fsl_mc_io;
* Maximum number of storage-profiles per DPNI
*/
#define DPNI_MAX_SP 2
+/**
+ * Maximum number of Tx channels per DPNI
+ */
+#define DPNI_MAX_CHANNELS 16
/**
* All traffic classes considered; see dpni_set_queue()
@@ -117,6 +121,13 @@ struct fsl_mc_io;
*/
#define DPNI_SW_SEQUENCE_LAYOUT_SIZE 33
+/**
+ * Build a parameter from dpni channel and trafiic class. This parameter
+ * will be used to configure / query information from dpni objects created
+ * to support multiple channels.
+ */
+#define DPNI_BUILD_PARAM(channel, tc_id) (((channel) << 8) | (tc_id))
+
int dpni_open(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
int dpni_id,
@@ -187,6 +198,8 @@ int dpni_close(struct fsl_mc_io *mc_io,
* field is ignored if the DPNI has a single TC. Otherwise,
* a value of 0 defaults to 64. Maximum supported value
* is 64.
+ * @num_channels: Number of egress channels used by this dpni object. If
+ * set to zero the dpni object will use a single CEETM channel.
*/
struct dpni_cfg {
uint32_t options;
@@ -200,6 +213,7 @@ struct dpni_cfg {
uint8_t num_cgs;
uint16_t num_opr;
uint8_t dist_key_size;
+ uint8_t num_channels;
};
int dpni_create(struct fsl_mc_io *mc_io,
@@ -362,6 +376,7 @@ struct dpni_attr {
uint8_t fs_key_size;
uint16_t wriop_version;
uint8_t num_cgs;
+ uint8_t num_channels;
};
int dpni_get_attributes(struct fsl_mc_io *mc_io,
@@ -779,12 +794,29 @@ struct dpni_tx_shaping_cfg {
uint16_t max_burst_size;
};
+/**
+ * Build the parameter for dpni_set_tx_shaping() call
+ * @oal: Overhead accounting length. 11bit value added to the size of
+ * each frame. Used only for LNI shaping. If set to zero, will use default
+ * value of 24. Ignored if shaping_lni is set to zero.
+ * @shaping_lni: 1 for LNI shaping (configure whole throughput of the dpni object)
+ * 0 for channel shaping (configure shaping for individual channels)
+ * Set to one only if dpni is connected to a dpmac object.
+ * @channel_id: Channel to be configured. Ignored shaping_lni is set to 1
+ * @coupled: Committed and excess rates are coupled
+ */
+#define DPNI_TX_SHAPING_PARAM(oal, shaping_lni, channel_id, coupled) ( \
+ ((uint32_t)(((oal) & 0x7ff) << 16)) | \
+ ((uint32_t)((channel_id) & 0xff) << 8) | \
+ ((uint32_t)(!!shaping_lni) << 1) | \
+ ((uint32_t)!!coupled))
+
int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
const struct dpni_tx_shaping_cfg *tx_cr_shaper,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
- int coupled);
+ uint32_t param);
int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -918,12 +950,14 @@ struct dpni_tx_schedule_cfg {
/**
* struct dpni_tx_priorities_cfg - Structure representing transmission
* priorities for DPNI TCs
+ * @channel_idx: channel to perform the configuration
* @tc_sched: An array of traffic-classes
* @prio_group_A: Priority of group A
* @prio_group_B: Priority of group B
* @separate_groups: Treat A and B groups as separate
*/
struct dpni_tx_priorities_cfg {
+ uint8_t channel_idx;
struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC];
uint32_t prio_group_A;
uint32_t prio_group_B;
@@ -1155,14 +1189,14 @@ int dpni_set_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
int dpni_get_early_drop(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
+ uint16_t param,
uint64_t early_drop_iova);
/**
@@ -1290,15 +1324,15 @@ int dpni_set_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- const struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ const struct dpni_congestion_notification_cfg *cfg);
int dpni_get_congestion_notification(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc_id,
- struct dpni_congestion_notification_cfg *cfg);
+ uint16_t param,
+ struct dpni_congestion_notification_cfg *cfg);
/* DPNI FLC stash options */
@@ -1590,7 +1624,7 @@ int dpni_set_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
uint8_t options,
const struct dpni_queue *queue);
@@ -1599,7 +1633,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
enum dpni_queue_type qtype,
- uint8_t tc,
+ uint16_t param,
uint8_t index,
struct dpni_queue *queue,
struct dpni_queue_id *qid);
@@ -1643,7 +1677,7 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
uint16_t token,
enum dpni_congestion_point cg_point,
enum dpni_queue_type q_type,
- uint8_t tc,
+ uint16_t param,
uint8_t q_index,
struct dpni_taildrop *taildrop);
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 6fbd93bb38..8bff2ec9af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -8,14 +8,15 @@
#define _FSL_DPNI_CMD_H
/* DPNI Version */
-#define DPNI_VER_MAJOR 7
-#define DPNI_VER_MINOR 17
+#define DPNI_VER_MAJOR 8
+#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_VERSION_2 2
#define DPNI_CMD_VERSION_3 3
#define DPNI_CMD_VERSION_4 4
#define DPNI_CMD_VERSION_5 5
+#define DPNI_CMD_VERSION_6 6
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
@@ -23,17 +24,18 @@
#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3)
#define DPNI_CMD_V4(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_4)
#define DPNI_CMD_V5(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_5)
+#define DPNI_CMD_V6(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_6)
/* Command IDs */
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
-#define DPNI_CMDID_CREATE DPNI_CMD_V5(0x901)
+#define DPNI_CMDID_CREATE DPNI_CMD_V6(0x901)
#define DPNI_CMDID_DESTROY DPNI_CMD(0x981)
#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01)
#define DPNI_CMDID_ENABLE DPNI_CMD(0x002)
#define DPNI_CMDID_DISABLE DPNI_CMD(0x003)
-#define DPNI_CMDID_GET_ATTR DPNI_CMD_V3(0x004)
+#define DPNI_CMDID_GET_ATTR DPNI_CMD_V4(0x004)
#define DPNI_CMDID_RESET DPNI_CMD(0x005)
#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006)
@@ -54,7 +56,7 @@
#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216)
#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217)
#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A)
-#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B)
+#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V3(0x21B)
#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220)
#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221)
@@ -83,25 +85,25 @@
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
-#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V2(0x250)
+#define DPNI_CMDID_SET_TX_PRIORITIES DPNI_CMD_V3(0x250)
#define DPNI_CMDID_GET_RX_TC_POLICING DPNI_CMD(0x251)
-#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D)
+#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V4(0x25D)
#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E)
-#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F)
-#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V2(0x260)
+#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V3(0x25F)
+#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V3(0x260)
#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261)
-#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262)
+#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V3(0x262)
#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263)
#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264)
#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265)
-#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x267)
-#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x268)
-#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269)
-#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A)
+#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x267)
+#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V3(0x268)
+#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V3(0x269)
+#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V3(0x26A)
#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B)
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
@@ -136,7 +138,7 @@ struct dpni_cmd_create {
uint8_t num_queues;
uint8_t num_tcs;
uint8_t mac_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t vlan_filter_entries;
uint8_t pad2;
uint8_t qos_entries;
@@ -230,7 +232,7 @@ struct dpni_rsp_get_attr {
uint8_t num_tx_tcs;
/* response word 1 */
uint8_t vlan_filter_entries;
- uint8_t pad1;
+ uint8_t num_channels;
uint8_t qos_entries;
uint8_t pad2;
uint16_t fs_entries;
@@ -367,6 +369,8 @@ struct dpni_rsp_get_link_state {
#define DPNI_COUPLED_SHIFT 0
#define DPNI_COUPLED_SIZE 1
+#define DPNI_LNI_SHAPER_SHIFT 1
+#define DPNI_LNI_SHAPER_SIZE 1
struct dpni_cmd_set_tx_shaping {
uint16_t tx_cr_max_burst_size;
@@ -374,8 +378,10 @@ struct dpni_cmd_set_tx_shaping {
uint32_t pad;
uint32_t tx_cr_rate_limit;
uint32_t tx_er_rate_limit;
- /* from LSB: coupled:1 */
- uint8_t coupled;
+ /* from LSB: coupled:1, lni_shaper: 1*/
+ uint8_t options;
+ uint8_t channel_id;
+ uint16_t oal;
};
struct dpni_cmd_set_max_frame_length {
@@ -466,7 +472,8 @@ struct dpni_cmd_set_tx_priorities {
uint16_t flags;
uint8_t prio_group_A;
uint8_t prio_group_B;
- uint32_t pad0;
+ uint8_t channel_idx;
+ uint8_t pad0[3];
uint8_t modes[4];
uint32_t pad1;
uint64_t pad2;
@@ -499,6 +506,7 @@ struct dpni_cmd_get_queue {
uint8_t qtype;
uint8_t tc;
uint8_t index;
+ uint8_t channel_id;
};
#define DPNI_DEST_TYPE_SHIFT 0
@@ -551,6 +559,7 @@ struct dpni_cmd_set_queue {
uint64_t user_context;
/* cmd word 4 */
uint8_t cgid;
+ uint8_t channel_id;
};
#define DPNI_DISCARD_ON_MISS_SHIFT 0
@@ -683,7 +692,8 @@ struct dpni_early_drop {
struct dpni_cmd_early_drop {
uint8_t qtype;
uint8_t tc;
- uint8_t pad[6];
+ uint8_t channel_id;
+ uint8_t pad[5];
uint64_t early_drop_iova;
};
@@ -723,7 +733,8 @@ struct dpni_cmd_set_taildrop {
uint8_t qtype;
uint8_t tc;
uint8_t index;
- uint32_t pad0;
+ uint8_t channel_id;
+ uint8_t pad0[3];
/* cmd word 1 */
/* from LSB: enable:1 oal_lo:7 */
uint8_t enable_oal_lo;
@@ -747,7 +758,7 @@ struct dpni_tx_confirmation_mode {
struct dpni_cmd_set_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
uint8_t pad2[3];
@@ -765,7 +776,7 @@ struct dpni_cmd_set_congestion_notification {
struct dpni_cmd_get_congestion_notification {
uint8_t qtype;
uint8_t tc;
- uint8_t pad;
+ uint8_t channel_id;
uint8_t congestion_point;
uint8_t cgid;
};
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 02/15] bus/fslmc: use dmb oshst for synchronization before I/O
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
2022-01-03 10:01 ` [PATCH v3 01/15] bus/fslmc: update MC to 10.29 nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 03/15] net/dpaa2: warn user in case of high nb desc nipun.gupta
` (13 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
Outer Shareable Store (oshst) is sufficient for Data Memory
Barrier (dmb) when doing IO on the interface via QBMAN.
This will sync L3/DDR with the L1/L2 cached data.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/qbman/include/compat.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
index a4471a80af..ece5da5906 100644
--- a/drivers/bus/fslmc/qbman/include/compat.h
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
- * Copyright 2017 NXP
+ * Copyright 2017,2021 NXP
*
*/
@@ -81,7 +81,7 @@ do { \
#define __raw_readl(p) (*(const volatile unsigned int *)(p))
#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
-#define dma_wmb() rte_smp_mb()
+#define dma_wmb() rte_io_wmb()
#define atomic_t rte_atomic32_t
#define atomic_read(v) rte_atomic32_read(v)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 03/15] net/dpaa2: warn user in case of high nb desc
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
2022-01-03 10:01 ` [PATCH v3 01/15] bus/fslmc: update MC to 10.29 nipun.gupta
2022-01-03 10:01 ` [PATCH v3 02/15] bus/fslmc: use dmb oshst for synchronization before I/O nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 04/15] net/dpaa2: fix unregistering interrupt handler nipun.gupta
` (12 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Rohit Raj
From: Rohit Raj <rohit.raj@nxp.com>
Added warning message if application is configuring nb_desc
more than supported by PEB memory suggesting user to configure
HW descriptors in normal memory rather than in faster PEB
memory.
Signed-off-by: Rohit Raj <rohit.raj@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a3706439d5..f5cac8f9d9 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -74,6 +74,9 @@ int dpaa2_timestamp_dynfield_offset = -1;
/* Enable error queue */
bool dpaa2_enable_err_queue;
+#define MAX_NB_RX_DESC 11264
+int total_nb_rx_desc;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -694,6 +697,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
dev, rx_queue_id, mb_pool, rx_conf);
+ total_nb_rx_desc += nb_rx_desc;
+ if (total_nb_rx_desc > MAX_NB_RX_DESC) {
+ DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
+ MAX_NB_RX_DESC);
+ DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
+ }
+
/* Rx deferred start is not supported */
if (rx_conf->rx_deferred_start) {
DPAA2_PMD_ERR("%p:Rx deferred start not supported",
@@ -984,6 +994,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
memset(&cfg, 0, sizeof(struct dpni_queue));
PMD_INIT_FUNC_TRACE();
+
+ total_nb_rx_desc -= dpaa2_q->nb_desc;
+
if (dpaa2_q->cgid != 0xff) {
options = DPNI_QUEUE_OPT_CLEAR_CGID;
cfg.cgid = dpaa2_q->cgid;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 04/15] net/dpaa2: fix unregistering interrupt handler
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (2 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 03/15] net/dpaa2: warn user in case of high nb desc nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 05/15] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
` (11 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev
Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
This patch fixes code that handles unregistering LSC
interrupt handler in dpaa2_dev_stop API.
Fixes: c5acbb5ea20e ("net/dpaa2: support link status event")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index f5cac8f9d9..18ff07249f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1265,7 +1265,12 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
int ret;
struct rte_eth_link link;
- struct rte_intr_handle *intr_handle = dev->intr_handle;
+ struct rte_device *rdev = dev->device;
+ struct rte_intr_handle *intr_handle;
+ struct rte_dpaa2_device *dpaa2_dev;
+
+ dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+ intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 05/15] net/dpaa2: fix timestamping for IEEE1588
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (3 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 04/15] net/dpaa2: fix unregistering interrupt handler nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 06/15] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
` (10 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev
Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, stable, Vanshika Shukla
From: Vanshika Shukla <vanshika.shukla@nxp.com>
The current implementation of DPAA2 driver code is such
that it records Rx and Tx timestamp for PTP without checking
if they are PTP packets or not. Packets for which
RTE_MBUF_F_RX_IEEE1588_TMST and RTE_MBUF_F_TX_IEEE1588_TMST
is not set, Rx and Tx timestamp should not be recorded.
This patch fixes this issue by checking if the required
flags are set in the mbuf before recording timestamps.
Also this change defines separate values for
DPAA2_TX_CONF_ENABLE and DPAA2_NO_PREFETCH_RX
Fixes: e806bf878c17 ("net/dpaa2: support timestamp")
Cc: stable@dpdk.org
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.h | 2 +-
drivers/net/dpaa2/dpaa2_ptp.c | 8 ++++---
drivers/net/dpaa2/dpaa2_rxtx.c | 39 +++++++++++++++++++++++++-------
3 files changed, 37 insertions(+), 12 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c5e9267bf0..c21571e63d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -62,7 +62,7 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
/* Tx confirmation enabled */
-#define DPAA2_TX_CONF_ENABLE 0x08
+#define DPAA2_TX_CONF_ENABLE 0x06
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
diff --git a/drivers/net/dpaa2/dpaa2_ptp.c b/drivers/net/dpaa2/dpaa2_ptp.c
index 8d79e39244..3a4536dd69 100644
--- a/drivers/net/dpaa2/dpaa2_ptp.c
+++ b/drivers/net/dpaa2/dpaa2_ptp.c
@@ -111,10 +111,12 @@ int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- if (priv->next_tx_conf_queue)
- dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
- else
+ if (priv->next_tx_conf_queue) {
+ while (!priv->tx_timestamp)
+ dpaa2_dev_tx_conf(priv->next_tx_conf_queue);
+ } else {
return -1;
+ }
*timestamp = rte_ns_to_timespec(priv->tx_timestamp);
return 0;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index c65589a5f3..ee3ed1b152 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -140,8 +140,10 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
annotation->word3, annotation->word4);
#if defined(RTE_LIBRTE_IEEE1588)
- if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
+ if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+ }
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
@@ -769,7 +771,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
else
bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
#if defined(RTE_LIBRTE_IEEE1588)
- priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
#endif
if (eth_data->dev_conf.rxmode.offloads &
@@ -986,6 +991,13 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
bufs[num_rx] = eth_fd_to_mbuf(fd,
eth_data->port_id);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
+#endif
+
if (eth_data->dev_conf.rxmode.offloads &
RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
struct dpaa2_annot_hdr *annotation;
+ void *v_addr;
+ struct rte_mbuf *mbuf;
#endif
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
num_tx_conf++;
num_pulled++;
#if defined(RTE_LIBRTE_IEEE1588)
- annotation = (struct dpaa2_annot_hdr *)((size_t)
- DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE);
- priv->tx_timestamp = annotation->word2;
+ v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
+ annotation = (struct dpaa2_annot_hdr *)((size_t)
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE);
+ priv->tx_timestamp = annotation->word2;
+ }
#endif
} while (pending);
@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
* corresponding to last packet transmitted for reading
* the timestamp
*/
- priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
- dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
+ priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+ dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ priv->tx_timestamp = 0;
+ }
#endif
/*Prepare enqueue descriptor*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 06/15] net/dpaa2: support multiple txqs en-queue for ordered
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (4 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 05/15] net/dpaa2: fix timestamping for IEEE1588 nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 07/15] net/dpaa2: add support for level 2 in traffic management nipun.gupta
` (9 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
Support the tx enqueue in order queue mode, where queue id
for each event may be different.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/event/dpaa2/dpaa2_eventdev.c | 12 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 4 +
drivers/net/dpaa2/dpaa2_rxtx.c | 142 +++++++++++++++++++++++++++
drivers/net/dpaa2/version.map | 1 +
4 files changed, 155 insertions(+), 4 deletions(-)
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4d94c315d2..ffc7b8b073 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017,2019 NXP
+ * Copyright 2017,2019-2021 NXP
*/
#include <assert.h>
@@ -1003,16 +1003,20 @@ dpaa2_eventdev_txa_enqueue(void *port,
struct rte_event ev[],
uint16_t nb_events)
{
- struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
uint8_t qid, i;
RTE_SET_USED(port);
for (i = 0; i < nb_events; i++) {
- qid = rte_event_eth_tx_adapter_txq_get(m);
- rte_eth_tx_burst(m->port, qid, &m, 1);
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m[i]);
+ txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
}
+ dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
+
return nb_events;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index c21571e63d..e001a7e49d 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -241,6 +241,10 @@ void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
+__rte_internal
+uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts);
+
uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index ee3ed1b152..1096b1cf1d 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1468,6 +1468,148 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to multiple queues respectively.*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ struct rte_mbuf *mi;
+ struct rte_eth_dev_data *eth_data;
+ struct dpaa2_dev_priv *priv;
+ struct dpaa2_queue *order_sendq;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ for (loop = 0; loop < nb_pkts; loop++) {
+ dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+ eth_data = dpaa2_q[loop]->eth_data;
+ priv = eth_data->dev_private;
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+ order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_fq(&eqdesc[loop],
+ dpaa2_q[loop]->fqid);
+ }
+
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & RTE_MBUF_F_TX_VLAN)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_frames;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ dpaa2_q[loop]++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_frames;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_frames;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_frames;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp,
+ bpid))
+ goto send_frames;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+
+ bufs++;
+ dpaa2_q[loop]++;
+ }
+
+send_frames:
+ frames_to_send = loop;
+ loop = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[loop],
+ frames_to_send - loop);
+ if (likely(ret > 0)) {
+ loop += ret;
+ } else {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ break;
+ }
+ }
+
+ return loop;
+}
+
/* Callback to handle sending ordered packets through WRIOP based interface */
uint16_t
dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
diff --git a/drivers/net/dpaa2/version.map b/drivers/net/dpaa2/version.map
index 2fe61f3442..cc82b8579d 100644
--- a/drivers/net/dpaa2/version.map
+++ b/drivers/net/dpaa2/version.map
@@ -21,6 +21,7 @@ EXPERIMENTAL {
INTERNAL {
global:
+ dpaa2_dev_tx_multi_txq_ordered;
dpaa2_eth_eventq_attach;
dpaa2_eth_eventq_detach;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 07/15] net/dpaa2: add support for level 2 in traffic management
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (5 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 06/15] net/dpaa2: support multiple txqs en-queue for ordered nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 08/15] net/dpaa2: secondary process handling for dpni nipun.gupta
` (8 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
This patch adds support for level 2 for QoS shaping.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
doc/guides/nics/dpaa2.rst | 2 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 55 ++-
drivers/net/dpaa2/dpaa2_ethdev.h | 6 +-
drivers/net/dpaa2/dpaa2_tm.c | 563 ++++++++++++++++++++++------
drivers/net/dpaa2/dpaa2_tm.h | 17 +-
drivers/net/dpaa2/mc/dpni.c | 302 +++++++++------
drivers/net/dpaa2/mc/fsl_dpni.h | 119 +++---
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 79 ++--
8 files changed, 791 insertions(+), 352 deletions(-)
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 831bc56488..2d113f53df 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -588,7 +588,7 @@ Supported Features
The following capabilities are supported:
-- Level0 (root node) and Level1 are supported.
+- Level0 (root node), Level1 and Level2 are supported.
- 1 private shaper at root node (port level) is supported.
- 8 TX queues per port supported (1 channel per port)
- Both SP and WFQ scheduling mechanisms are supported on all 8 queues.
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 18ff07249f..b91e773605 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -852,6 +852,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct dpni_queue tx_conf_cfg;
struct dpni_queue tx_flow_cfg;
uint8_t options = 0, flow_id;
+ uint16_t channel_id;
struct dpni_queue_id qid;
uint32_t tc_id;
int ret;
@@ -877,20 +878,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
- tc_id = tx_queue_id;
- flow_id = 0;
-
- ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
- tc_id, flow_id, options, &tx_flow_cfg);
- if (ret) {
- DPAA2_PMD_ERR("Error in setting the tx flow: "
- "tc_id=%d, flow=%d err=%d",
- tc_id, flow_id, ret);
- return -1;
- }
-
- dpaa2_q->flow_id = flow_id;
-
if (tx_queue_id == 0) {
/*Set tx-conf and error configuration*/
if (priv->flags & DPAA2_TX_CONF_ENABLE)
@@ -907,10 +894,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
}
+
+ tc_id = tx_queue_id % priv->num_tx_tc;
+ channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
+ flow_id = 0;
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
dpaa2_q->tc_index = tc_id;
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, dpaa2_q->tc_index,
+ DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
dpaa2_q->flow_id, &tx_flow_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -942,7 +945,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
priv->token,
DPNI_QUEUE_TX,
- tc_id,
+ ((channel_id << 8) | tc_id),
&cong_notif_cfg);
if (ret) {
DPAA2_PMD_ERR(
@@ -959,7 +962,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
options = options | DPNI_QUEUE_OPT_USER_CTX;
tx_conf_cfg.user_context = (size_t)(dpaa2_q);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
if (ret) {
DPAA2_PMD_ERR("Error in setting the tx conf flow: "
@@ -970,7 +973,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+ DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
if (ret) {
DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -1152,7 +1155,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
struct dpni_queue cfg;
struct dpni_error_cfg err_cfg;
- uint16_t qdid;
struct dpni_queue_id qid;
struct dpaa2_queue *dpaa2_q;
int ret, i;
@@ -1162,7 +1164,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
intr_handle = dpaa2_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
-
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
@@ -1173,14 +1174,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
/* Power up the phy. Needed to make the link go UP */
dpaa2_dev_set_link_up(dev);
- ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
- DPNI_QUEUE_TX, &qdid);
- if (ret) {
- DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
- return ret;
- }
- priv->qdid = qdid;
-
for (i = 0; i < data->nb_rx_queues; i++) {
dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
@@ -2619,9 +2612,12 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
}
priv->num_rx_tc = attr.num_rx_tcs;
+ priv->num_tx_tc = attr.num_tx_tcs;
priv->qos_entries = attr.qos_entries;
priv->fs_entries = attr.fs_entries;
priv->dist_queues = attr.num_queues;
+ priv->num_channels = attr.num_channels;
+ priv->channel_inuse = 0;
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2635,8 +2631,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
for (i = 0; i < attr.num_rx_tcs; i++)
priv->nb_rx_queues += attr.num_queues;
- /* Using number of TX queues as number of TX TCs */
- priv->nb_tx_queues = attr.num_tx_tcs;
+ priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
priv->num_rx_tc, priv->nb_rx_queues,
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index e001a7e49d..1fc2fc367e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -25,6 +25,7 @@
#define MAX_RX_QUEUES 128
#define MAX_TX_QUEUES 16
#define MAX_DPNI 8
+#define DPAA2_MAX_CHANNELS 16
#define DPAA2_RX_DEFAULT_NBDESC 512
@@ -160,15 +161,17 @@ struct dpaa2_dev_priv {
void *rx_vq[MAX_RX_QUEUES];
void *tx_vq[MAX_TX_QUEUES];
struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
- void *tx_conf_vq[MAX_TX_QUEUES];
+ void *tx_conf_vq[MAX_TX_QUEUES * DPAA2_MAX_CHANNELS];
void *rx_err_vq;
uint8_t flags; /*dpaa2 config flags */
uint8_t max_mac_filters;
uint8_t max_vlan_filters;
uint8_t num_rx_tc;
+ uint8_t num_tx_tc;
uint16_t qos_entries;
uint16_t fs_entries;
uint8_t dist_queues;
+ uint8_t num_channels;
uint8_t en_ordered;
uint8_t en_loose_ordered;
uint8_t max_cgs;
@@ -190,6 +193,7 @@ struct dpaa2_dev_priv {
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
diff --git a/drivers/net/dpaa2/dpaa2_tm.c b/drivers/net/dpaa2/dpaa2_tm.c
index f5faaedfb4..8fe5bfa013 100644
--- a/drivers/net/dpaa2/dpaa2_tm.c
+++ b/drivers/net/dpaa2/dpaa2_tm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#include <rte_ethdev.h>
@@ -7,12 +7,16 @@
#include <rte_tm_driver.h>
#include "dpaa2_ethdev.h"
+#include "dpaa2_pmd_logs.h"
+#include <dpaa2_hw_dpio.h>
#define DPAA2_BURST_MAX (64 * 1024)
#define DPAA2_SHAPER_MIN_RATE 0
#define DPAA2_SHAPER_MAX_RATE 107374182400ull
#define DPAA2_WEIGHT_MAX 24701
+#define DPAA2_PKT_ADJUST_LEN_MIN 0
+#define DPAA2_PKT_ADJUST_LEN_MAX 0x7ff
int
dpaa2_tm_init(struct rte_eth_dev *dev)
@@ -66,6 +70,8 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -73,27 +79,31 @@ dpaa2_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- /* root node(port) + txqs number, assuming each TX
+ /* root node(port) + channels + txqs number, assuming each TX
* Queue is mapped to each TC
*/
- cap->n_nodes_max = 1 + dev->data->nb_tx_queues;
- cap->n_levels_max = 2; /* port level + txqs level */
+ cap->n_nodes_max = 1 + priv->num_channels + dev->data->nb_tx_queues;
+ cap->n_levels_max = MAX_LEVEL;
cap->non_leaf_nodes_identical = 1;
cap->leaf_nodes_identical = 1;
- cap->shaper_n_max = 1;
- cap->shaper_private_n_max = 1;
- cap->shaper_private_dual_rate_n_max = 1;
+ cap->shaper_n_max = 1 + priv->num_channels; /* LNI + channels */
+ cap->shaper_private_n_max = 1 + priv->num_channels;
+ cap->shaper_private_dual_rate_n_max = 1 + priv->num_channels;
cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+ cap->shaper_pkt_length_adjust_min = DPAA2_PKT_ADJUST_LEN_MIN;
+ cap->shaper_pkt_length_adjust_max = DPAA2_PKT_ADJUST_LEN_MAX;
- cap->sched_n_children_max = dev->data->nb_tx_queues;
- cap->sched_sp_n_priorities_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_children_per_group_max = dev->data->nb_tx_queues;
- cap->sched_wfq_n_groups_max = 2;
- cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ if (priv->num_channels > DPNI_MAX_TC)
+ cap->sched_n_children_max = priv->num_channels;
+ else
+ cap->sched_n_children_max = DPNI_MAX_TC;
- cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_STATS;
+ cap->sched_sp_n_priorities_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_children_per_group_max = DPNI_MAX_TC;
+ cap->sched_wfq_n_groups_max = 2;
+ cap->sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
return 0;
@@ -105,6 +115,8 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
if (!cap)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -112,12 +124,12 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
memset(cap, 0, sizeof(*cap));
- if (level_id > 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
- if (level_id == 0) { /* Root node */
+ if (level_id == LNI_LEVEL) { /* Root node (LNI) */
cap->n_nodes_max = 1;
cap->n_nodes_nonleaf_max = 1;
cap->non_leaf_nodes_identical = 1;
@@ -127,20 +139,39 @@ dpaa2_level_capabilities_get(struct rte_eth_dev *dev,
cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels; /* no. of channels */
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
- cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
cap->nonleaf.stats_mask = RTE_TM_STATS_N_PKTS |
RTE_TM_STATS_N_BYTES;
+ } else if (level_id == CHANNEL_LEVEL) { /* channels */
+ cap->n_nodes_max = priv->num_channels;
+ cap->n_nodes_nonleaf_max = priv->num_channels;
+ cap->n_nodes_leaf_max = 0;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = 1;
+ cap->nonleaf.shaper_private_dual_rate_supported = 1;
+ cap->nonleaf.shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->nonleaf.shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ /* no. of class queues per channel */
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_groups_max = 2;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else { /* leaf nodes */
- cap->n_nodes_max = dev->data->nb_tx_queues;
- cap->n_nodes_leaf_max = dev->data->nb_tx_queues;
+ /* queues per channels * channel */
+ cap->n_nodes_max = priv->num_tx_tc * priv->num_channels;
+ cap->n_nodes_leaf_max = priv->num_tx_tc * priv->num_channels;
cap->leaf_nodes_identical = 1;
- cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->leaf.shaper_private_supported = 0;
+ cap->leaf.stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -167,18 +198,33 @@ dpaa2_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- if (node->type == 0) {
+ if (node->level_id == LNI_LEVEL) {
cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
- cap->nonleaf.sched_n_children_max = dev->data->nb_tx_queues;
+ cap->nonleaf.sched_n_children_max = priv->num_channels;
cap->nonleaf.sched_sp_n_priorities_max = 1;
- cap->nonleaf.sched_wfq_n_children_per_group_max =
- dev->data->nb_tx_queues;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ } else if (node->level_id == CHANNEL_LEVEL) {
+ cap->shaper_private_supported = 1;
+ cap->shaper_private_dual_rate_supported = 1;
+ cap->shaper_private_rate_min = DPAA2_SHAPER_MIN_RATE;
+ cap->shaper_private_rate_max = DPAA2_SHAPER_MAX_RATE;
+
+ cap->nonleaf.sched_n_children_max = priv->num_tx_tc;
+ cap->nonleaf.sched_sp_n_priorities_max = priv->num_tx_tc;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = priv->num_tx_tc;
cap->nonleaf.sched_wfq_n_groups_max = 2;
- cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX;
- cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ cap->nonleaf.sched_wfq_weight_max = DPAA2_WEIGHT_MAX / 100;
} else {
- cap->stats_mask = RTE_TM_STATS_N_PKTS;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
}
return 0;
@@ -202,7 +248,7 @@ dpaa2_node_type_get(struct rte_eth_dev *dev, uint32_t node_id, int *is_leaf,
RTE_TM_ERROR_TYPE_NODE_ID,
NULL, "Node id does not exist\n");
- *is_leaf = node->type == 1/*NODE_QUEUE*/ ? 1 : 0;
+ *is_leaf = node->type == LEAF_NODE ? 1 : 0;
return 0;
}
@@ -257,6 +303,13 @@ dpaa2_shaper_profile_add(struct rte_eth_dev *dev, uint32_t shaper_profile_id,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
NULL, "Wrong shaper profile id\n");
+ if (params->pkt_length_adjust > DPAA2_PKT_ADJUST_LEN_MAX ||
+ params->pkt_length_adjust < DPAA2_PKT_ADJUST_LEN_MIN)
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_CAPABILITIES,
+ NULL,
+ "Not supported pkt adjust length\n");
+
profile = dpaa2_shaper_profile_from_id(priv, shaper_profile_id);
if (profile)
return -rte_tm_error_set(error, EEXIST,
@@ -318,7 +371,7 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_WEIGHT,
NULL, "Weight is out of range\n");
- if (level_id != 0 && level_id != 1)
+ if (level_id > QUEUE_LEVEL)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_LEVEL_ID,
NULL, "Wrong level id\n");
@@ -338,39 +391,38 @@ dpaa2_node_check_params(struct rte_eth_dev *dev, uint32_t node_id,
RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
NULL, "Shared shaper is not supported\n");
- /* verify port (root node) settings */
+ /* verify non leaf nodes settings */
if (node_id >= dev->data->nb_tx_queues) {
if (params->nonleaf.wfq_weight_mode)
return -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
NULL, "WFQ weight mode is not supported\n");
-
- if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
- RTE_TM_STATS_N_BYTES))
+ } else {
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested port stats are not supported\n");
-
- return 0;
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
+ NULL, "Private shaper not supported on leaf\n");
}
- if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
- NULL, "Private shaper not supported on leaf\n");
-
- if (params->stats_mask & ~RTE_TM_STATS_N_PKTS)
- return -rte_tm_error_set(error, EINVAL,
- RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
- NULL,
- "Requested stats are not supported\n");
/* check leaf node */
- if (level_id == 1) {
+ if (level_id == QUEUE_LEVEL) {
if (params->leaf.cman != RTE_TM_CMAN_TAIL_DROP)
return -rte_tm_error_set(error, ENODEV,
RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
NULL, "Only taildrop is supported\n");
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
+ } else if (level_id == LNI_LEVEL) {
+ if (params->stats_mask & ~(RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES))
+ return -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
+ NULL,
+ "Requested port stats are not supported\n");
}
return 0;
@@ -407,7 +459,7 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
}
if (parent_node_id == RTE_TM_NODE_ID_NULL) {
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type != 0 /*root node*/)
+ if (node->level_id != LNI_LEVEL)
continue;
return -rte_tm_error_set(error, EINVAL,
@@ -435,14 +487,29 @@ dpaa2_node_add(struct rte_eth_dev *dev, uint32_t node_id,
NULL, NULL);
node->id = node_id;
- node->type = parent_node_id == RTE_TM_NODE_ID_NULL ? 0/*NODE_PORT*/ :
- 1/*NODE_QUEUE*/;
+
+ if (node_id > dev->data->nb_tx_queues)
+ node->type = NON_LEAF_NODE;
+ else
+ node->type = LEAF_NODE;
+
+ node->level_id = level_id;
+ if (node->level_id == CHANNEL_LEVEL) {
+ if (priv->channel_inuse < priv->num_channels) {
+ node->channel_id = priv->channel_inuse;
+ priv->channel_inuse++;
+ } else {
+ printf("error no channel id available\n");
+ }
+ }
if (parent) {
node->parent = parent;
parent->refcnt++;
}
+ /* TODO: add check if refcnt is more than supported children */
+
if (profile) {
node->profile = profile;
profile->refcnt++;
@@ -464,6 +531,7 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpaa2_tm_node *node;
+ /* XXX: update it */
if (0) {
return -rte_tm_error_set(error, EPERM,
RTE_TM_ERROR_TYPE_UNSPECIFIED,
@@ -493,119 +561,326 @@ dpaa2_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
return 0;
}
+static int
+dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node)
+{
+ int ret = 0;
+ uint32_t tc_id;
+ uint8_t flow_id, options = 0;
+ struct dpni_queue tx_flow_cfg;
+ struct dpni_queue_id qid;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q;
+
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+ tc_id = node->parent->tc_id;
+ node->parent->tc_id++;
+ flow_id = 0;
+
+ if (dpaa2_q == NULL) {
+ printf("Queue is not configured for node = %d\n", node->id);
+ return -1;
+ }
+
+ DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id,
+ node->parent->channel_id);
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ printf("Error in setting the tx flow: "
+ "channel id = %d tc_id= %d, param = 0x%x "
+ "flow=%d err=%d\n", node->parent->channel_id, tc_id,
+ ((node->parent->channel_id << 8) | tc_id), flow_id,
+ ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+ dpaa2_q->tc_index = tc_id;
+
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, ((node->parent->channel_id << 8) | dpaa2_q->tc_index),
+ dpaa2_q->flow_id, &tx_flow_cfg, &qid);
+ if (ret) {
+ printf("Error in getting LFQID err=%d", ret);
+ return -1;
+ }
+ dpaa2_q->fqid = qid.fqid;
+
+ /* setting congestion notification */
+ if (!(priv->flags & DPAA2_TX_CGR_OFF)) {
+ struct dpni_congestion_notification_cfg cong_notif_cfg = {0};
+
+ cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
+ cong_notif_cfg.threshold_entry = dpaa2_q->nb_desc;
+ /* Notify that the queue is not congested when the data in
+ * the queue is below this thershold.(90% of value)
+ */
+ cong_notif_cfg.threshold_exit = (dpaa2_q->nb_desc * 9) / 10;
+ cong_notif_cfg.message_ctx = 0;
+ cong_notif_cfg.message_iova =
+ (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn);
+ cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
+ cong_notif_cfg.notification_mode =
+ DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
+ DPNI_CONG_OPT_WRITE_MEM_ON_EXIT |
+ DPNI_CONG_OPT_COHERENT_WRITE;
+ cong_notif_cfg.cg_point = DPNI_CP_QUEUE;
+
+ ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_QUEUE_TX,
+ ((node->parent->channel_id << 8) | tc_id),
+ &cong_notif_cfg);
+ if (ret) {
+ printf("Error in setting tx congestion notification: "
+ "err=%d", ret);
+ return -ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev,
+ struct dpaa2_tm_node **nodes, int n)
+{
+ struct dpaa2_tm_node *temp_node;
+ int i;
+
+ if (n == 1) {
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+ return;
+ }
+
+ for (i = 0; i < n - 1; i++) {
+ if (nodes[i]->priority > nodes[i + 1]->priority) {
+ temp_node = nodes[i];
+ nodes[i] = nodes[i + 1];
+ nodes[i + 1] = temp_node;
+ }
+ }
+ dpaa2_tm_sort_and_configure(dev, nodes, n - 1);
+
+ DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n",
+ nodes[n - 1]->id, nodes[n - 1]->priority,
+ n - 1);
+ dpaa2_tm_configure_queue(dev, nodes[n - 1]);
+}
+
static int
dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
struct rte_tm_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct dpaa2_tm_node *node, *temp_node;
+ struct dpaa2_tm_node *node;
+ struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
- int ret;
- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
- struct dpni_tx_priorities_cfg prio_cfg;
+ int ret, t;
+
+ /* Populate TCs */
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ struct dpaa2_tm_node *nodes[DPNI_MAX_TC];
+ int i = 0;
- memset(&prio_cfg, 0, sizeof(prio_cfg));
- memset(conf, 0, sizeof(conf));
+ if (channel_node->level_id != CHANNEL_LEVEL)
+ continue;
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+ if (leaf_node->parent == channel_node) {
+ if (i >= DPNI_MAX_TC) {
+ ret = -rte_tm_error_set(error, EINVAL,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "More children than supported\n");
+ goto out;
+ }
+ nodes[i++] = leaf_node;
+ }
+ }
+ if (i > 0) {
+ DPAA2_PMD_DEBUG("Configure queues\n");
+ dpaa2_tm_sort_and_configure(dev, nodes, i);
+ }
+ }
+
+ /* Shaping */
LIST_FOREACH(node, &priv->nodes, next) {
- if (node->type == 0/*root node*/) {
+ if (node->type == NON_LEAF_NODE) {
if (!node->profile)
continue;
-
struct dpni_tx_shaping_cfg tx_cr_shaper, tx_er_shaper;
+ uint32_t param = 0;
tx_cr_shaper.max_burst_size =
node->profile->params.committed.size;
tx_cr_shaper.rate_limit =
- node->profile->params.committed.rate / (1024 * 1024);
+ node->profile->params.committed.rate /
+ (1024 * 1024);
tx_er_shaper.max_burst_size =
node->profile->params.peak.size;
tx_er_shaper.rate_limit =
node->profile->params.peak.rate / (1024 * 1024);
+ /* root node */
+ if (node->parent == NULL) {
+ DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n",
+ tx_cr_shaper.rate_limit,
+ tx_cr_shaper.max_burst_size);
+ param = 0x2;
+ param |= node->profile->params.pkt_length_adjust << 16;
+ } else {
+ DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n",
+ node->channel_id,
+ tx_cr_shaper.rate_limit);
+ param = (node->channel_id << 8);
+ }
ret = dpni_set_tx_shaping(dpni, 0, priv->token,
- &tx_cr_shaper, &tx_er_shaper, 0);
+ &tx_cr_shaper, &tx_er_shaper, param);
if (ret) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_SHAPER_PROFILE, NULL,
"Error in setting Shaping\n");
goto out;
}
+ continue;
+ }
+ }
+ LIST_FOREACH(channel_node, &priv->nodes, next) {
+ int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC];
+ struct dpni_tx_priorities_cfg prio_cfg;
+
+ memset(&prio_cfg, 0, sizeof(prio_cfg));
+ memset(conf, 0, sizeof(conf));
+
+ /* Process for each channel */
+ if (channel_node->level_id != CHANNEL_LEVEL)
continue;
- } else { /* level 1, all leaf nodes */
- if (node->id >= dev->data->nb_tx_queues) {
+
+ LIST_FOREACH(leaf_node, &priv->nodes, next) {
+ struct dpaa2_queue *leaf_dpaa2_q;
+ uint8_t leaf_tc_id;
+
+ if (leaf_node->level_id == LNI_LEVEL ||
+ leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ /* level 2, all leaf nodes */
+ if (leaf_node->id >= dev->data->nb_tx_queues) {
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID, NULL,
"Not enough txqs configured\n");
goto out;
}
- if (conf[node->id])
+ if (conf[leaf_node->id])
+ continue;
+
+ if (leaf_node->parent != channel_node)
continue;
- LIST_FOREACH(temp_node, &priv->nodes, next) {
- if (temp_node->id == node->id ||
- temp_node->type == 0)
+ leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id];
+ leaf_tc_id = leaf_dpaa2_q->tc_index;
+ /* Process sibling leaf nodes */
+ LIST_FOREACH(temp_leaf_node, &priv->nodes, next) {
+ if (temp_leaf_node->id == leaf_node->id ||
+ temp_leaf_node->level_id == LNI_LEVEL ||
+ temp_leaf_node->level_id == CHANNEL_LEVEL)
+ continue;
+
+ if (temp_leaf_node->parent != channel_node)
continue;
- if (conf[temp_node->id])
+
+ if (conf[temp_leaf_node->id])
continue;
- if (node->priority == temp_node->priority) {
+
+ if (leaf_node->priority == temp_leaf_node->priority) {
+ struct dpaa2_queue *temp_leaf_dpaa2_q;
+ uint8_t temp_leaf_tc_id;
+
+ temp_leaf_dpaa2_q = (struct dpaa2_queue *)
+ dev->data->tx_queues[temp_leaf_node->id];
+ temp_leaf_tc_id = temp_leaf_dpaa2_q->tc_index;
if (wfq_grp == 0) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- /* DPDK support lowest weight 1
- * and DPAA2 platform 100
- */
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ /* DPAA2 support weight in multiple of 100 */
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[temp_node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[temp_node->id].delta_bandwidth =
- temp_node->weight + 99;
+ prio_cfg.tc_sched[temp_leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[temp_leaf_tc_id].delta_bandwidth =
+ temp_leaf_node->weight * 100;
} else {
- /*TODO: add one more check for
- * number of nodes in a group
- */
ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Only 2 WFQ Groups are supported\n");
goto out;
}
- conf[temp_node->id] = 1;
is_wfq_grp = 1;
+ conf[temp_leaf_node->id] = 1;
}
}
if (is_wfq_grp) {
if (wfq_grp == 0) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_A;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_A = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_A;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_A = leaf_node->priority;
} else if (wfq_grp == 1) {
- prio_cfg.tc_sched[node->id].mode =
- DPNI_TX_SCHED_WEIGHTED_B;
- prio_cfg.tc_sched[node->id].delta_bandwidth =
- node->weight + 99;
- prio_cfg.prio_group_B = node->priority;
+ prio_cfg.tc_sched[leaf_tc_id].mode =
+ DPNI_TX_SCHED_WEIGHTED_B;
+ prio_cfg.tc_sched[leaf_tc_id].delta_bandwidth =
+ leaf_node->weight * 100;
+ prio_cfg.prio_group_B = leaf_node->priority;
}
wfq_grp++;
is_wfq_grp = 0;
}
- conf[node->id] = 1;
+ conf[leaf_node->id] = 1;
}
- if (wfq_grp)
+ if (wfq_grp > 1) {
prio_cfg.separate_groups = 1;
- }
- ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
- if (ret) {
- ret = -rte_tm_error_set(error, EINVAL,
+ if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) {
+ prio_cfg.prio_group_A = 0;
+ prio_cfg.prio_group_B = 1;
+ } else {
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.prio_group_B = 0;
+ }
+ }
+
+ prio_cfg.prio_group_A = 1;
+ prio_cfg.channel_idx = channel_node->channel_id;
+ ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg);
+ if (ret) {
+ ret = -rte_tm_error_set(error, EINVAL,
RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
"Scheduling Failed\n");
- goto out;
+ goto out;
+ }
+ DPAA2_PMD_DEBUG("########################################\n");
+ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx);
+ for (t = 0; t < DPNI_MAX_TC; t++) {
+ DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode);
+ DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth);
+ }
+ DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A);
+ DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B);
+ DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups);
}
-
return 0;
out:
@@ -617,6 +892,81 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail,
return ret;
}
+static int
+dpaa2_node_stats_read(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats, uint64_t *stats_mask,
+ int clear, struct rte_tm_error *error)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_tm_node *node;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
+ union dpni_statistics value;
+ int ret = 0;
+
+ node = dpaa2_node_from_id(priv, node_id);
+ if (!node)
+ return -rte_tm_error_set(error, ENODEV,
+ RTE_TM_ERROR_TYPE_NODE_ID,
+ NULL, "Node id does not exist\n");
+
+ if (stats_mask)
+ *stats_mask = node->stats_mask;
+
+ if (!stats)
+ return 0;
+
+ memset(stats, 0, sizeof(*stats));
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ if (node->level_id == LNI_LEVEL) {
+ uint8_t page1 = 1;
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, 0, &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read port statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_1.egress_all_frames;
+
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_1.egress_all_bytes;
+
+ if (clear) {
+ ret = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to reset port statistics\n");
+ }
+ } else if (node->level_id == QUEUE_LEVEL) {
+ uint8_t page3 = 3;
+ struct dpaa2_queue *dpaa2_q;
+ dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[node->id];
+
+ ret = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page3,
+ (node->parent->channel_id << 8 |
+ dpaa2_q->tc_index), &value);
+ if (ret)
+ return -rte_tm_error_set(error, -ret,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read queue statistics\n");
+
+ if (node->stats_mask & RTE_TM_STATS_N_PKTS)
+ stats->n_pkts = value.page_3.ceetm_dequeue_frames;
+ if (node->stats_mask & RTE_TM_STATS_N_BYTES)
+ stats->n_bytes = value.page_3.ceetm_dequeue_bytes;
+ } else {
+ return -rte_tm_error_set(error, -1,
+ RTE_TM_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to read channel statistics\n");
+ }
+
+ return 0;
+}
+
const struct rte_tm_ops dpaa2_tm_ops = {
.node_type_get = dpaa2_node_type_get,
.capabilities_get = dpaa2_capabilities_get,
@@ -627,4 +977,5 @@ const struct rte_tm_ops dpaa2_tm_ops = {
.node_add = dpaa2_node_add,
.node_delete = dpaa2_node_delete,
.hierarchy_commit = dpaa2_hierarchy_commit,
+ .node_stats_read = dpaa2_node_stats_read,
};
diff --git a/drivers/net/dpaa2/dpaa2_tm.h b/drivers/net/dpaa2/dpaa2_tm.h
index 6632fab687..cfbb437322 100644
--- a/drivers/net/dpaa2/dpaa2_tm.h
+++ b/drivers/net/dpaa2/dpaa2_tm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
*/
#ifndef _DPAA2_TM_H_
@@ -7,6 +7,18 @@
#include <rte_tm.h>
+enum node_type {
+ NON_LEAF_NODE = 0,
+ LEAF_NODE
+};
+
+enum level_type {
+ LNI_LEVEL = 0,
+ CHANNEL_LEVEL,
+ QUEUE_LEVEL,
+ MAX_LEVEL
+};
+
struct dpaa2_tm_shaper_profile {
LIST_ENTRY(dpaa2_tm_shaper_profile) next;
uint32_t id;
@@ -18,6 +30,9 @@ struct dpaa2_tm_node {
LIST_ENTRY(dpaa2_tm_node) next;
uint32_t id;
uint32_t type;
+ uint32_t level_id;
+ uint16_t channel_id; /* Only for level 1 nodes */
+ uint16_t tc_id; /* Only for level 1 nodes */
int refcnt;
struct dpaa2_tm_node *parent;
struct dpaa2_tm_shaper_profile *profile;
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index cf78295d90..b7a65cb637 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -916,6 +916,44 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_link_cfg() - return the link configuration configured by
+ * dpni_set_link_cfg().
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpni_cmd_set_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cfg->advertising = le64_to_cpu(rsp_params->advertising);
+ cfg->options = le64_to_cpu(rsp_params->options);
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+
+ return err;
+}
+
/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
@@ -1678,6 +1716,38 @@ int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpni_get_tx_confirmation_mode() - Get Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode *mode)
+{
+ struct dpni_tx_confirmation_mode *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_tx_confirmation_mode *)cmd.params;
+ *mode = rsp_params->confirmation_mode;
+
+ return 0;
+}
+
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
@@ -2733,6 +2803,122 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
return 0;
}
+int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_load_ss_cfg *cfg)
+{
+ struct dpni_load_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
+ cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_enable_ss_cfg *cfg)
+{
+ struct dpni_enable_sw_sequence *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
+ cmd_params->dest = cfg->dest;
+ cmd_params->set_start = cfg->set_start;
+ cmd_params->hxs = cpu_to_le16(cfg->hxs);
+ cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
+ cmd_params->param_offset = cfg->param_offset;
+ cmd_params->param_size = cfg->param_size;
+ cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_get_sw_sequence_layout() - Get the soft sequence layout
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @src: Source of the layout (WRIOP Rx or Tx)
+ * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
+ *
+ * warning: After calling this function, call dpni_extract_sw_sequence_layout()
+ * to get the layout.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_soft_sequence_dest src,
+ uint64_t ss_layout_iova)
+{
+ struct dpni_get_sw_sequence_layout *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
+ cmd_flags,
+ token);
+
+ cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
+ cmd_params->src = src;
+ cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_extract_sw_sequence_layout() - extract the software sequence layout
+ * @layout: software sequence layout
+ * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
+ * to DMA
+ *
+ * This function has to be called after dpni_get_sw_sequence_layout
+ *
+ */
+void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
+ const uint8_t *sw_sequence_layout_buf)
+{
+ const struct dpni_sw_sequence_layout_entry *ext_params;
+ int i;
+ uint16_t ss_size, ss_offset;
+
+ ext_params = (const struct dpni_sw_sequence_layout_entry *)
+ sw_sequence_layout_buf;
+
+ for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
+ ss_offset = le16_to_cpu(ext_params[i].ss_offset);
+ ss_size = le16_to_cpu(ext_params[i].ss_size);
+
+ if (ss_offset == 0 && ss_size == 0) {
+ layout->num_ss = i;
+ return;
+ }
+
+ layout->ss[i].ss_offset = ss_offset;
+ layout->ss[i].ss_size = ss_size;
+ layout->ss[i].param_offset = ext_params[i].param_offset;
+ layout->ss[i].param_size = ext_params[i].param_size;
+ }
+}
/**
* dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution
* @mc_io: Pointer to MC portal's I/O object
@@ -2901,119 +3087,3 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
-int dpni_load_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_load_ss_cfg *cfg)
-{
- struct dpni_load_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_LOAD_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_load_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->ss_size = cpu_to_le16(cfg->ss_size);
- cmd_params->ss_iova = cpu_to_le64(cfg->ss_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-int dpni_enable_sw_sequence(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpni_enable_ss_cfg *cfg)
-{
- struct dpni_enable_sw_sequence *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_SW_SEQUENCE,
- cmd_flags,
- token);
- cmd_params = (struct dpni_enable_sw_sequence *)cmd.params;
- cmd_params->dest = cfg->dest;
- cmd_params->set_start = cfg->set_start;
- cmd_params->hxs = cpu_to_le16(cfg->hxs);
- cmd_params->ss_offset = cpu_to_le16(cfg->ss_offset);
- cmd_params->param_offset = cfg->param_offset;
- cmd_params->param_size = cfg->param_size;
- cmd_params->param_iova = cpu_to_le64(cfg->param_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_get_sw_sequence_layout() - Get the soft sequence layout
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPNI object
- * @src: Source of the layout (WRIOP Rx or Tx)
- * @ss_layout_iova: I/O virtual address of 264 bytes DMA-able memory
- *
- * warning: After calling this function, call dpni_extract_sw_sequence_layout()
- * to get the layout.
- *
- * Return: '0' on Success; error code otherwise.
- */
-int dpni_get_sw_sequence_layout(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- enum dpni_soft_sequence_dest src,
- uint64_t ss_layout_iova)
-{
- struct dpni_get_sw_sequence_layout *cmd_params;
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT,
- cmd_flags,
- token);
-
- cmd_params = (struct dpni_get_sw_sequence_layout *)cmd.params;
- cmd_params->src = src;
- cmd_params->layout_iova = cpu_to_le64(ss_layout_iova);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
- * dpni_extract_sw_sequence_layout() - extract the software sequence layout
- * @layout: software sequence layout
- * @sw_sequence_layout_buf: Zeroed 264 bytes of memory before mapping it
- * to DMA
- *
- * This function has to be called after dpni_get_sw_sequence_layout
- *
- */
-void dpni_extract_sw_sequence_layout(struct dpni_sw_sequence_layout *layout,
- const uint8_t *sw_sequence_layout_buf)
-{
- const struct dpni_sw_sequence_layout_entry *ext_params;
- int i;
- uint16_t ss_size, ss_offset;
-
- ext_params = (const struct dpni_sw_sequence_layout_entry *)
- sw_sequence_layout_buf;
-
- for (i = 0; i < DPNI_SW_SEQUENCE_LAYOUT_SIZE; i++) {
- ss_offset = le16_to_cpu(ext_params[i].ss_offset);
- ss_size = le16_to_cpu(ext_params[i].ss_size);
-
- if (ss_offset == 0 && ss_size == 0) {
- layout->num_ss = i;
- return;
- }
-
- layout->ss[i].ss_offset = ss_offset;
- layout->ss[i].ss_size = ss_size;
- layout->ss[i].param_offset = ext_params[i].param_offset;
- layout->ss[i].param_size = ext_params[i].param_size;
- }
-}
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
index 8aead28261..c7df727fef 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -761,6 +761,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint16_t token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
@@ -1709,63 +1714,6 @@ int dpni_get_opr(struct fsl_mc_io *mc_io,
uint8_t flags,
uint8_t opr_id);
-/**
- * When used for queue_idx in function dpni_set_rx_dist_default_queue will
- * signal to dpni to drop all unclassified frames
- */
-#define DPNI_FS_MISS_DROP ((uint16_t)-1)
-
-/**
- * struct dpni_rx_dist_cfg - distribution configuration
- * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
- * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
- * 512,768,896,1024
- * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
- * the extractions to be used for the distribution key by calling
- * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
- * it can be '0'
- * @enable: enable/disable the distribution.
- * @tc: TC id for which distribution is set
- * @fs_miss_flow_id: when packet misses all rules from flow steering table and
- * hash is disabled it will be put into this queue id; use
- * DPNI_FS_MISS_DROP to drop frames. The value of this field is
- * used only when flow steering distribution is enabled and hash
- * distribution is disabled
- */
-struct dpni_rx_dist_cfg {
- uint16_t dist_size;
- uint64_t key_cfg_iova;
- uint8_t enable;
- uint8_t tc;
- uint16_t fs_miss_flow_id;
-};
-
-int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, const struct dpni_rx_dist_cfg *cfg);
-
-int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, uint16_t tpid);
-
-/**
- * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
- * values used in current dpni object to detect 802.1q frames.
- * @tpid1: first tag. Not used if zero.
- * @tpid2: second tag. Not used if zero.
- */
-struct dpni_custom_tpid_cfg {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
- uint16_t token, struct dpni_custom_tpid_cfg *tpid);
-
/**
* enum dpni_soft_sequence_dest - Enumeration of WRIOP software sequence
* destinations
@@ -1936,4 +1884,61 @@ int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
int dpni_get_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
uint16_t token, struct dpni_port_cfg *port_cfg);
+/**
+ * When used for queue_idx in function dpni_set_rx_dist_default_queue will
+ * signal to dpni to drop all unclassified frames
+ */
+#define DPNI_FS_MISS_DROP ((uint16_t)-1)
+
+/**
+ * struct dpni_rx_dist_cfg - distribution configuration
+ * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8,
+ * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448,
+ * 512,768,896,1024
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise
+ * it can be '0'
+ * @enable: enable/disable the distribution.
+ * @tc: TC id for which distribution is set
+ * @fs_miss_flow_id: when packet misses all rules from flow steering table and
+ * hash is disabled it will be put into this queue id; use
+ * DPNI_FS_MISS_DROP to drop frames. The value of this field is
+ * used only when flow steering distribution is enabled and hash
+ * distribution is disabled
+ */
+struct dpni_rx_dist_cfg {
+ uint16_t dist_size;
+ uint64_t key_cfg_iova;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t fs_miss_flow_id;
+};
+
+int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, const struct dpni_rx_dist_cfg *cfg);
+
+int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint16_t tpid);
+
+/**
+ * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID
+ * values used in current dpni object to detect 802.1q frames.
+ * @tpid1: first tag. Not used if zero.
+ * @tpid2: second tag. Not used if zero.
+ */
+struct dpni_custom_tpid_cfg {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
+int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, struct dpni_custom_tpid_cfg *tpid);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index 8bff2ec9af..ed0bd7615a 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -108,16 +108,17 @@
#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C)
#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266)
#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D)
+#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
+#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_LOAD_SW_SEQUENCE DPNI_CMD(0x270)
#define DPNI_CMDID_ENABLE_SW_SEQUENCE DPNI_CMD(0x271)
#define DPNI_CMDID_GET_SW_SEQUENCE_LAYOUT DPNI_CMD(0x272)
-#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e)
-#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f)
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275)
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -451,8 +452,6 @@ struct dpni_cmd_enable_vlan_filter {
uint8_t en;
};
-#define DPNI_VLAN_SET_QUEUE_ACTION 1
-
struct dpni_cmd_vlan_id {
uint8_t flags;
uint8_t tc_id;
@@ -854,42 +853,6 @@ struct dpni_rsp_get_opr {
uint16_t opr_id;
};
-struct dpni_cmd_add_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_cmd_remove_custom_tpid {
- uint16_t pad;
- uint16_t tpid;
-};
-
-struct dpni_rsp_get_custom_tpid {
- uint16_t tpid1;
- uint16_t tpid2;
-};
-
-#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_fs_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc;
- uint16_t miss_flow_id;
- uint16_t pad1;
- uint64_t key_cfg_iova;
-};
-
-#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
-#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
-struct dpni_cmd_set_rx_hash_dist {
- uint16_t dist_size;
- uint8_t enable;
- uint8_t tc_id;
- uint32_t pad;
- uint64_t key_cfg_iova;
-};
-
struct dpni_load_sw_sequence {
uint8_t dest;
uint8_t pad0[7];
@@ -957,5 +920,41 @@ struct dpni_rsp_get_port_cfg {
uint32_t bit_params;
};
+#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_FS_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_fs_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc;
+ uint16_t miss_flow_id;
+ uint16_t pad1;
+ uint64_t key_cfg_iova;
+};
+
+#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0
+#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1
+struct dpni_cmd_set_rx_hash_dist {
+ uint16_t dist_size;
+ uint8_t enable;
+ uint8_t tc_id;
+ uint32_t pad;
+ uint64_t key_cfg_iova;
+};
+
+struct dpni_cmd_add_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_cmd_remove_custom_tpid {
+ uint16_t pad;
+ uint16_t tpid;
+};
+
+struct dpni_rsp_get_custom_tpid {
+ uint16_t tpid1;
+ uint16_t tpid2;
+};
+
#pragma pack(pop)
#endif /* _FSL_DPNI_CMD_H */
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 08/15] net/dpaa2: secondary process handling for dpni
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (6 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 07/15] net/dpaa2: add support for level 2 in traffic management nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 09/15] bus/fslmc: add and scan dprc devices nipun.gupta
` (7 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
This change uses 'dev->process_private' instead of 'priv->hw'
to get dpmcp per process while setting flow distribution,
as priv->hw is only valid for primary process.
It also initialize rte_dpaa2_bpid_info in secondary process.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +++++++++++++++++++++++
drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +++++++++++++++
drivers/mempool/dpaa2/version.map | 1 +
drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 ++---
drivers/net/dpaa2/dpaa2_ethdev.c | 10 ++++++++--
drivers/net/dpaa2/dpaa2_ethdev.h | 3 ++-
6 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 39c6252a63..56c629c681 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -263,6 +263,29 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
}
}
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info = mempool_to_bpinfo(mp);
+ uint32_t bpid = bp_info->bpid;
+
+ if (!rte_dpaa2_bpid_info) {
+ rte_dpaa2_bpid_info = (struct dpaa2_bp_info *)rte_malloc(NULL,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID,
+ RTE_CACHE_LINE_SIZE);
+ if (rte_dpaa2_bpid_info == NULL)
+ return -ENOMEM;
+ memset(rte_dpaa2_bpid_info, 0,
+ sizeof(struct dpaa2_bp_info) * MAX_BPID);
+ }
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_info->bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ return 0;
+}
+
uint16_t
rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
{
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
index 4a22b7c42e..28dea74326 100644
--- a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -46,6 +46,21 @@ rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
struct rte_mbuf *
rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+/**
+ * Initialize the rte_dpaa2_bpid_info
+ * In generial, it is called in the secondary process and
+ * mp has been created in the primary process.
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * - 0 on success.
+ * - (<0) on failure.
+ */
+__rte_internal
+int rte_dpaa2_bpid_info_init(struct rte_mempool *mp);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/mempool/dpaa2/version.map b/drivers/mempool/dpaa2/version.map
index 49c460ec54..cfd4ae617a 100644
--- a/drivers/mempool/dpaa2/version.map
+++ b/drivers/mempool/dpaa2/version.map
@@ -11,5 +11,6 @@ INTERNAL {
global:
rte_dpaa2_bpid_info;
+ rte_dpaa2_bpid_info_init;
rte_dpaa2_mbuf_alloc_bulk;
};
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index 3170694841..9509f6e8a3 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -95,7 +95,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
uint64_t req_dist_set, int tc_index)
{
struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
- struct fsl_mc_io *dpni = priv->hw;
+ struct fsl_mc_io *dpni = eth_dev->process_private;
struct dpni_rx_dist_cfg tc_cfg;
struct dpkg_profile_cfg kg_cfg;
void *p_params;
@@ -457,13 +457,12 @@ dpaa2_distset_to_dpkg_profile_cfg(
int
dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
- void *blist)
+ struct fsl_mc_io *dpni, void *blist)
{
/* Function to attach a DPNI with a buffer pool list. Buffer pool list
* handle is passed in blist.
*/
int32_t retcode;
- struct fsl_mc_io *dpni = priv->hw;
struct dpni_pools_cfg bpool_cfg;
struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
struct dpni_buffer_layout layout;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index b91e773605..a45beed75f 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -18,6 +18,7 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
#include <rte_flow_driver.h>
+#include "rte_dpaa2_mempool.h"
#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
@@ -712,9 +713,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ret = rte_dpaa2_bpid_info_init(mb_pool);
+ if (ret)
+ return ret;
+ }
bpid = mempool_to_bpid(mb_pool);
- ret = dpaa2_attach_bp_list(priv,
- rte_dpaa2_bpid_info[bpid].bp_list);
+ ret = dpaa2_attach_bp_list(priv, dpni,
+ rte_dpaa2_bpid_info[bpid].bp_list);
if (ret)
return ret;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 1fc2fc367e..bd33a22a8e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -208,7 +208,8 @@ int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
uint8_t tc_index);
-int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ struct fsl_mc_io *dpni, void *blist);
__rte_internal
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 09/15] bus/fslmc: add and scan dprc devices
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (7 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 08/15] net/dpaa2: secondary process handling for dpni nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 10/15] net/dpaa2: support recycle loopback port nipun.gupta
` (6 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Jun Yang, Nipun Gupta
From: Jun Yang <jun.yang@nxp.com>
In order to get connection endpoint of each objects,
scan the dprc object.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/fslmc_bus.c | 15 ++-
drivers/bus/fslmc/fslmc_vfio.c | 18 +++-
drivers/bus/fslmc/mc/dprc.c | 129 +++++++++++++++++++++++
drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++++++++
drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 +++++++++
drivers/bus/fslmc/meson.build | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 ++++++++++++++++++
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 12 +++
drivers/bus/fslmc/rte_fslmc.h | 10 +-
9 files changed, 374 insertions(+), 8 deletions(-)
create mode 100644 drivers/bus/fslmc/mc/dprc.c
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index a0ef24cdc8..a3c0d838c4 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2018-2019 NXP
+ * Copyright 2016,2018-2021 NXP
*
*/
@@ -136,10 +136,6 @@ scan_one_fslmc_device(char *dev_name)
if (!dev_name)
return ret;
- /* Ignore the Container name itself */
- if (!strncmp("dprc", dev_name, 4))
- return 0;
-
/* Creating a temporary copy to perform cut-parse over string */
dup_dev_name = strdup(dev_name);
if (!dup_dev_name) {
@@ -197,6 +193,8 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_MUX;
else if (!strncmp("dprtc", t_ptr, 5))
dev->dev_type = DPAA2_DPRTC;
+ else if (!strncmp("dprc", t_ptr, 4))
+ dev->dev_type = DPAA2_DPRC;
else
dev->dev_type = DPAA2_UNKNOWN;
@@ -339,6 +337,13 @@ rte_fslmc_scan(void)
goto scan_fail;
}
+ /* Scan the DPRC container object */
+ ret = scan_one_fslmc_device(fslmc_container);
+ if (ret != 0) {
+ /* Error in parsing directory - exit gracefully */
+ goto scan_fail_cleanup;
+ }
+
while ((entry = readdir(dir)) != NULL) {
if (entry->d_name[0] == '.' || entry->d_type != DT_DIR)
continue;
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index b4704eeae4..1b89a56bbc 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -728,6 +728,7 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
case DPAA2_BPOOL:
case DPAA2_DPRTC:
case DPAA2_MUX:
+ case DPAA2_DPRC:
TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
if (dev->dev_type == object->dev_type)
object->create(dev_fd, &device_info,
@@ -881,6 +882,21 @@ fslmc_vfio_process_group(void)
return -1;
}
+ /* Search for DPRC device next as it updates endpoint of
+ * other devices.
+ */
+ current_device = 0;
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
+ if (dev->dev_type == DPAA2_DPRC) {
+ ret = fslmc_process_iodevices(dev);
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to process dprc");
+ return -1;
+ }
+ TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
+ }
+ }
+
current_device = 0;
RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next,
dev_temp) {
diff --git a/drivers/bus/fslmc/mc/dprc.c b/drivers/bus/fslmc/mc/dprc.c
new file mode 100644
index 0000000000..491081c7c8
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dprc.c
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dprc.h>
+#include <fsl_dprc_cmd.h>
+
+/** @addtogroup dprc
+ * @{
+ */
+
+/**
+ * dprc_open() - Open DPRC object for use
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id: Container ID to open
+ * @token: Returned token of DPRC object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Required before any operation on the object.
+ */
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
+ 0);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dprc_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dprc_get_connection() - Get connected endpoint and link status if connection
+ * exists.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPRC object
+ * @endpoint1: Endpoint 1 configuration parameters
+ * @endpoint2: Returned endpoint 2 configuration parameters
+ * @state: Returned link state:
+ * 1 - link is up;
+ * 0 - link is down;
+ * -1 - no connection (endpoint2 information is irrelevant)
+ *
+ * Return: '0' on Success; -ENAVAIL if connection does not exist.
+ */
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
+ int err, i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le16(endpoint1->if_id);
+ for (i = 0; i < 16; i++)
+ cmd_params->ep1_type[i] = endpoint1->type[i];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le16_to_cpu(rsp_params->ep2_interface_id);
+ *state = le32_to_cpu(rsp_params->state);
+ for (i = 0; i < 16; i++)
+ endpoint2->type[i] = rsp_params->ep2_type[i];
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dprc.h b/drivers/bus/fslmc/mc/fsl_dprc.h
new file mode 100644
index 0000000000..177210c2d4
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+#ifndef _FSL_DPRC_H
+#define _FSL_DPRC_H
+
+/** @addtogroup dprc Data Path Resource Container API
+ * Contains DPRC API for managing and querying DPAA resources
+ * @{
+ */
+
+struct fsl_mc_io;
+
+int dprc_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int container_id,
+ uint16_t *token);
+
+int dprc_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dprc_endpoint - Endpoint description for link connect/disconnect
+ * operations
+ * @type: Endpoint object type: NULL terminated string
+ * @id: Endpoint object ID
+ * @if_id: Interface ID; should be set for endpoints with multiple
+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0
+ */
+struct dprc_endpoint {
+ char type[16];
+ int id;
+ uint16_t if_id;
+};
+
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dprc_endpoint *endpoint1,
+ struct dprc_endpoint *endpoint2,
+ int *state);
+#endif /* _FSL_DPRC_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dprc_cmd.h b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
new file mode 100644
index 0000000000..6efa5634d2
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dprc_cmd.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2016-2021 NXP
+ *
+ */
+
+#ifndef _FSL_DPRC_CMD_H
+#define _FSL_DPRC_CMD_H
+
+/* Minimal supported DPRC Version */
+#define DPRC_VER_MAJOR 6
+#define DPRC_VER_MINOR 6
+
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION 1
+#define DPRC_CMD_ID_OFFSET 4
+
+#define DPRC_CMD(id) ((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
+#define DPRC_CMDID_GET_CONNECTION DPRC_CMD(0x16C)
+
+#pragma pack(push, 1)
+struct dprc_cmd_open {
+ uint32_t container_id;
+};
+
+struct dprc_cmd_get_connection {
+ uint32_t ep1_id;
+ uint16_t ep1_interface_id;
+ uint16_t pad;
+
+ uint8_t ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ uint64_t pad[3];
+ uint32_t ep2_id;
+ uint16_t ep2_interface_id;
+ uint16_t pad1;
+ uint8_t ep2_type[16];
+ uint32_t state;
+};
+#pragma pack(pop)
+#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
index 54be76f516..162ca286fe 100644
--- a/drivers/bus/fslmc/meson.build
+++ b/drivers/bus/fslmc/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2018 NXP
+# Copyright 2018,2021 NXP
if not is_linux
build = false
@@ -16,10 +16,12 @@ sources = files(
'mc/dpdmai.c',
'mc/dpio.c',
'mc/dpmng.c',
+ 'mc/dprc.c',
'mc/mc_sys.c',
'portal/dpaa2_hw_dpbp.c',
'portal/dpaa2_hw_dpci.c',
'portal/dpaa2_hw_dpio.c',
+ 'portal/dpaa2_hw_dprc.c',
'qbman/qbman_portal.c',
'qbman/qbman_debug.c',
)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
new file mode 100644
index 0000000000..ca1d0304d5
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2021 NXP
+ *
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_dev.h>
+
+#include <fslmc_logs.h>
+#include <rte_fslmc.h>
+#include <mc/fsl_dprc.h>
+#include "portal/dpaa2_hw_pvt.h"
+
+TAILQ_HEAD(dprc_dev_list, dpaa2_dprc_dev);
+static struct dprc_dev_list dprc_dev_list
+ = TAILQ_HEAD_INITIALIZER(dprc_dev_list); /*!< DPRC device list */
+
+static int
+rte_dpaa2_create_dprc_device(int vdev_fd __rte_unused,
+ struct vfio_device_info *obj_info __rte_unused,
+ int dprc_id)
+{
+ struct dpaa2_dprc_dev *dprc_node;
+ struct dprc_endpoint endpoint1, endpoint2;
+ struct rte_dpaa2_device *dev, *dev_tmp;
+ int ret;
+
+ /* Allocate DPAA2 dprc handle */
+ dprc_node = rte_malloc(NULL, sizeof(struct dpaa2_dprc_dev), 0);
+ if (!dprc_node) {
+ DPAA2_BUS_ERR("Memory allocation failed for DPRC Device");
+ return -ENOMEM;
+ }
+
+ /* Open the dprc object */
+ dprc_node->dprc.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+ dprc_node->dprc_id = dprc_id;
+ ret = dprc_open(&dprc_node->dprc,
+ CMD_PRI_LOW, dprc_id, &dprc_node->token);
+ if (ret) {
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_tmp) {
+ if (dev->dev_type == DPAA2_ETH) {
+ int link_state;
+
+ memset(&endpoint1, 0, sizeof(struct dprc_endpoint));
+ memset(&endpoint2, 0, sizeof(struct dprc_endpoint));
+ strcpy(endpoint1.type, "dpni");
+ endpoint1.id = dev->object_id;
+ ret = dprc_get_connection(&dprc_node->dprc,
+ CMD_PRI_LOW,
+ dprc_node->token,
+ &endpoint1, &endpoint2,
+ &link_state);
+ if (ret) {
+ DPAA2_BUS_ERR("dpni.%d connection failed!",
+ dev->object_id);
+ dprc_close(&dprc_node->dprc, CMD_PRI_LOW,
+ dprc_node->token);
+ rte_free(dprc_node);
+ return ret;
+ }
+
+ if (!strcmp(endpoint2.type, "dpmac"))
+ dev->ep_dev_type = DPAA2_MAC;
+ else if (!strcmp(endpoint2.type, "dpni"))
+ dev->ep_dev_type = DPAA2_ETH;
+ else if (!strcmp(endpoint2.type, "dpdmux"))
+ dev->ep_dev_type = DPAA2_MUX;
+ else
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+
+ dev->ep_object_id = endpoint2.id;
+ } else {
+ dev->ep_dev_type = DPAA2_UNKNOWN;
+ }
+ sprintf(dev->ep_name, "%s.%d", endpoint2.type, endpoint2.id);
+ }
+
+ TAILQ_INSERT_TAIL(&dprc_dev_list, dprc_node, next);
+
+ return 0;
+}
+
+static struct rte_dpaa2_object rte_dpaa2_dprc_obj = {
+ .dev_type = DPAA2_DPRC,
+ .create = rte_dpaa2_create_dprc_device,
+};
+
+RTE_PMD_REGISTER_DPAA2_OBJECT(dprc, rte_dpaa2_dprc_obj);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index b1bba1ac36..8cb4d404aa 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -187,6 +187,18 @@ struct swp_active_dqs {
extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
+/**
+ * A structure describing a DPAA2 container.
+ */
+struct dpaa2_dprc_dev {
+ TAILQ_ENTRY(dpaa2_dprc_dev) next;
+ /**< Pointer to Next device instance */
+ const char *name;
+ struct fsl_mc_io dprc; /** handle to DPRC portal object */
+ uint16_t token;
+ uint32_t dprc_id; /*HW ID for DPRC object */
+};
+
struct dpaa2_dpci_dev {
TAILQ_ENTRY(dpaa2_dpci_dev) next;
/**< Pointer to Next device instance */
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 729f360646..12b586b13b 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016,2019 NXP
+ * Copyright 2016,2021 NXP
*
*/
@@ -37,6 +37,9 @@ extern "C" {
#include <fslmc_vfio.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
#define DPAA2_INVALID_MBUF_SEQN 0
@@ -88,6 +91,8 @@ enum rte_dpaa2_dev_type {
DPAA2_QDMA, /**< DPDMAI type device */
DPAA2_MUX, /**< DPDMUX type device */
DPAA2_DPRTC, /**< DPRTC type device */
+ DPAA2_DPRC, /**< DPRC type device */
+ DPAA2_MAC, /**< DPMAC type device */
/* Unknown device placeholder */
DPAA2_UNKNOWN,
DPAA2_DEVTYPE_MAX,
@@ -122,6 +127,9 @@ struct rte_dpaa2_device {
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */
uint16_t object_id; /**< DPAA2 Object ID */
+ enum rte_dpaa2_dev_type ep_dev_type; /**< Endpoint Device Type */
+ uint16_t ep_object_id; /**< Endpoint DPAA2 Object ID */
+ char ep_name[RTE_DEV_NAME_MAX_LEN];
struct rte_intr_handle *intr_handle; /**< Interrupt handle */
struct rte_dpaa2_driver *driver; /**< Associated driver */
char name[FSLMC_OBJECT_MAX_LEN]; /**< DPAA2 Object name*/
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 10/15] net/dpaa2: support recycle loopback port
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (8 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 09/15] bus/fslmc: add and scan dprc devices nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-02-01 9:27 ` David Marchand
2022-01-03 10:01 ` [PATCH v3 11/15] net/dpaa: check status before configuring shared MAC nipun.gupta
` (5 subsequent siblings)
15 siblings, 1 reply; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Jun Yang
From: Jun Yang <jun.yang@nxp.com>
DPAA2 recycle port is used for configuring the device
in the loopback mode. Loopback configuration can be at
dpni level or at serdes level.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 3 +-
drivers/net/dpaa2/dpaa2_ethdev.c | 32 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 23 +
drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++++
drivers/net/dpaa2/mc/dpni.c | 32 +
drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 1 +
drivers/net/dpaa2/meson.build | 1 +
7 files changed, 870 insertions(+), 2 deletions(-)
create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 8cb4d404aa..4d0f7e4b5d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2021 NXP
*
*/
@@ -176,6 +176,7 @@ struct dpaa2_queue {
uint16_t nb_desc;
uint16_t resv;
uint64_t offloads;
+ uint64_t lpbk_cntx;
} __rte_cache_aligned;
struct swp_active_dqs {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index a45beed75f..d81f8cb07a 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -668,6 +668,30 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
+ if (eth_conf->lpbk_mode) {
+ ret = dpaa2_dev_recycle_config(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to configure %s to recycle port.",
+ dev->data->name);
+
+ return ret;
+ }
+ } else {
+ /** User may disable loopback mode by calling
+ * "dev_configure" with lpbk_mode cleared.
+ * No matter the port was configured recycle or not,
+ * recycle de-configure is called here.
+ * If port is not recycled, the de-configure will return directly.
+ */
+ ret = dpaa2_dev_recycle_deconfig(dev);
+ if (ret) {
+ DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
+ dev->data->name);
+
+ return ret;
+ }
+ }
+
dpaa2_tm_init(dev);
return 0;
@@ -2601,6 +2625,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
+ if (eth_dev->data->dev_conf.lpbk_mode)
+ dpaa2_dev_recycle_deconfig(eth_dev);
+
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
@@ -2624,6 +2651,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->dist_queues = attr.num_queues;
priv->num_channels = attr.num_channels;
priv->channel_inuse = 0;
+ rte_spinlock_init(&priv->lpbk_qp_lock);
/* only if the custom CG is enabled */
if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2808,7 +2836,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
return ret;
}
}
- RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
+ eth_dev->data->name, dpaa2_dev->ep_name);
+
return 0;
init_err:
dpaa2_dev_close(eth_dev);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index bd33a22a8e..b032da9eff 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -11,6 +11,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_pmd_dpaa2.h>
+#include <rte_fslmc.h>
#include <dpaa2_hw_pvt.h>
#include "dpaa2_tm.h"
@@ -65,6 +66,18 @@
/* Tx confirmation enabled */
#define DPAA2_TX_CONF_ENABLE 0x06
+/* HW loopback the egress traffic to self ingress*/
+#define DPAA2_TX_MAC_LOOPBACK_MODE 0x20
+
+#define DPAA2_TX_SERDES_LOOPBACK_MODE 0x40
+
+#define DPAA2_TX_DPNI_LOOPBACK_MODE 0x80
+
+#define DPAA2_TX_LOOPBACK_MODE \
+ (DPAA2_TX_MAC_LOOPBACK_MODE | \
+ DPAA2_TX_SERDES_LOOPBACK_MODE | \
+ DPAA2_TX_DPNI_LOOPBACK_MODE)
+
#define DPAA2_RSS_OFFLOAD_ALL ( \
RTE_ETH_RSS_L2_PAYLOAD | \
RTE_ETH_RSS_IP | \
@@ -192,6 +205,7 @@ struct dpaa2_dev_priv {
struct dpaa2_queue *next_tx_conf_queue;
struct rte_eth_dev *eth_dev; /**< Pointer back to holding ethdev */
+ rte_spinlock_t lpbk_qp_lock;
uint8_t channel_inuse;
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
@@ -268,4 +282,13 @@ int dpaa2_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
uint32_t flags __rte_unused);
int dpaa2_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
struct timespec *timestamp);
+
+int dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev);
+int dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq);
+
#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_recycle.c b/drivers/net/dpaa2/dpaa2_recycle.c
new file mode 100644
index 0000000000..e274d24ead
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_recycle.c
@@ -0,0 +1,780 @@
+/* * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2019-2021 NXP
+ *
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_fslmc.h>
+#include <rte_flow_driver.h>
+
+#include "dpaa2_pmd_logs.h"
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+#include <mc/fsl_dpmng.h>
+#include "dpaa2_ethdev.h"
+#include "dpaa2_sparser.h"
+#include <fsl_qbman_debug.h>
+
+#include <rte_io.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define LSX_SERDES_LAN_NB 8
+#define LSX_SERDES_REG_BASE 0x1ea0000
+#define LSX_LB_EN_BIT 0x10000000
+
+#define CONFIG_SYS_IMMR 0x01000000
+
+#define CONFIG_SYS_FSL_GUTS_ADDR (CONFIG_SYS_IMMR + 0x00E00000)
+#define CONFIG_SYS_FSL_SERDES_ADDR (CONFIG_SYS_IMMR + 0xEA0000)
+
+#define FSL_LX_SRDS1_PRTCL_SHIFT 16
+#define FSL_LX_SRDS2_PRTCL_SHIFT 21
+#define FSL_LX_SRDS3_PRTCL_SHIFT 26
+
+#define FSL_LS_SRDS1_PRTCL_SHIFT 16
+#define FSL_LS_SRDS2_PRTCL_SHIFT 0
+
+#define FSL_LX_SRDS1_REGSR 29
+#define FSL_LX_SRDS2_REGSR 29
+#define FSL_LX_SRDS3_REGSR 29
+
+#define FSL_LS_SRDS1_REGSR 29
+#define FSL_LS_SRDS2_REGSR 30
+
+#define FSL_LX_SRDS1_PRTCL_MASK 0x001F0000
+#define FSL_LX_SRDS2_PRTCL_MASK 0x03E00000
+#define FSL_LX_SRDS3_PRTCL_MASK 0x7C000000
+
+#define FSL_LS_SRDS1_PRTCL_MASK 0xFFFF0000
+#define FSL_LS_SRDS2_PRTCL_MASK 0x0000FFFF
+
+struct ccsr_lx_serdes_lan {
+ uint8_t unused1[0xa0];
+ uint32_t lnatcsr0;
+ uint8_t unused2[0x100 - 0xa4];
+} __rte_packed;
+
+struct ccsr_lx_serdes {
+ uint8_t unused0[0x800];
+ struct ccsr_lx_serdes_lan lane[LSX_SERDES_LAN_NB];
+} __rte_packed;
+
+struct ccsr_ls_serdes {
+ uint8_t unused[0x800];
+ struct serdes_lane {
+ uint32_t gcr0; /* General Control Register 0 */
+ uint32_t gcr1; /* General Control Register 1 */
+ uint32_t gcr2; /* General Control Register 2 */
+ uint32_t ssc0; /* Speed Switch Control 0 */
+ uint32_t rec0; /* Receive Equalization Control 0 */
+ uint32_t rec1; /* Receive Equalization Control 1 */
+ uint32_t tec0; /* Transmit Equalization Control 0 */
+ uint32_t ssc1; /* Speed Switch Control 1 */
+ uint32_t ttlc;
+ uint32_t rev[6];
+ uint32_t tsc3;
+ } lane[LSX_SERDES_LAN_NB];
+ uint8_t res5[0x19fc - 0xa00];
+} __rte_packed;
+
+struct ccsr_gur {
+ uint32_t porsr1; /* POR status 1 */
+ uint32_t porsr2; /* POR status 2 */
+ uint8_t res_008[0x20 - 0x8];
+ uint32_t gpporcr1; /* General-purpose POR configuration */
+ uint32_t gpporcr2; /* General-purpose POR configuration 2 */
+ uint32_t gpporcr3;
+ uint32_t gpporcr4;
+ uint8_t res_030[0x60 - 0x30];
+ uint32_t dcfg_fusesr; /* Fuse status register */
+ uint8_t res_064[0x70 - 0x64];
+ uint32_t devdisr; /* Device disable control 1 */
+ uint32_t devdisr2; /* Device disable control 2 */
+ uint32_t devdisr3; /* Device disable control 3 */
+ uint32_t devdisr4; /* Device disable control 4 */
+ uint32_t devdisr5; /* Device disable control 5 */
+ uint32_t devdisr6; /* Device disable control 6 */
+ uint8_t res_088[0x94 - 0x88];
+ uint32_t coredisr; /* Device disable control 7 */
+ uint8_t res_098[0xa0 - 0x98];
+ uint32_t pvr; /* Processor version */
+ uint32_t svr; /* System version */
+ uint8_t res_0a8[0x100 - 0xa8];
+ uint32_t rcwsr[30]; /* Reset control word status */
+
+ uint8_t res_178[0x200 - 0x178];
+ uint32_t scratchrw[16]; /* Scratch Read/Write */
+ uint8_t res_240[0x300 - 0x240];
+ uint32_t scratchw1r[4]; /* Scratch Read (Write once) */
+ uint8_t res_310[0x400 - 0x310];
+ uint32_t bootlocptrl; /* Boot location pointer low-order addr */
+ uint32_t bootlocptrh; /* Boot location pointer high-order addr */
+ uint8_t res_408[0x520 - 0x408];
+ uint32_t usb1_amqr;
+ uint32_t usb2_amqr;
+ uint8_t res_528[0x530 - 0x528]; /* add more registers when needed */
+ uint32_t sdmm1_amqr;
+ uint32_t sdmm2_amqr;
+ uint8_t res_538[0x550 - 0x538]; /* add more registers when needed */
+ uint32_t sata1_amqr;
+ uint32_t sata2_amqr;
+ uint32_t sata3_amqr;
+ uint32_t sata4_amqr;
+ uint8_t res_560[0x570 - 0x560]; /* add more registers when needed */
+ uint32_t misc1_amqr;
+ uint8_t res_574[0x590 - 0x574]; /* add more registers when needed */
+ uint32_t spare1_amqr;
+ uint32_t spare2_amqr;
+ uint32_t spare3_amqr;
+ uint8_t res_59c[0x620 - 0x59c]; /* add more registers when needed */
+ uint32_t gencr[7]; /* General Control Registers */
+ uint8_t res_63c[0x640 - 0x63c]; /* add more registers when needed */
+ uint32_t cgensr1; /* Core General Status Register */
+ uint8_t res_644[0x660 - 0x644]; /* add more registers when needed */
+ uint32_t cgencr1; /* Core General Control Register */
+ uint8_t res_664[0x740 - 0x664]; /* add more registers when needed */
+ uint32_t tp_ityp[64]; /* Topology Initiator Type Register */
+ struct {
+ uint32_t upper;
+ uint32_t lower;
+ } tp_cluster[4]; /* Core cluster n Topology Register */
+ uint8_t res_864[0x920 - 0x864]; /* add more registers when needed */
+ uint32_t ioqoscr[8]; /*I/O Quality of Services Register */
+ uint32_t uccr;
+ uint8_t res_944[0x960 - 0x944]; /* add more registers when needed */
+ uint32_t ftmcr;
+ uint8_t res_964[0x990 - 0x964]; /* add more registers when needed */
+ uint32_t coredisablesr;
+ uint8_t res_994[0xa00 - 0x994]; /* add more registers when needed */
+ uint32_t sdbgcr; /*Secure Debug Confifuration Register */
+ uint8_t res_a04[0xbf8 - 0xa04]; /* add more registers when needed */
+ uint32_t ipbrr1;
+ uint32_t ipbrr2;
+ uint8_t res_858[0x1000 - 0xc00];
+} __rte_packed;
+
+static void *lsx_ccsr_map_region(uint64_t addr, size_t len)
+{
+ int fd;
+ void *tmp;
+ uint64_t start;
+ uint64_t offset;
+
+ fd = open("/dev/mem", O_RDWR);
+ if (fd < 0) {
+ DPAA2_PMD_ERR("Fail to open /dev/mem");
+ return NULL;
+ }
+
+ start = addr & PAGE_MASK;
+ offset = addr - start;
+ len = len & PAGE_MASK;
+ if (len < (size_t)PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ tmp = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, start);
+
+ close(fd);
+
+ if (tmp != MAP_FAILED)
+ return (uint8_t *)tmp + offset;
+ else
+ return NULL;
+}
+
+static const uint8_t ls_sd1_prot_idx_map[] = {
+ 0x03, 0x05, 0x07, 0x09, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c,
+ 0x1e, 0x20, 0x22, 0x24, 0x26, 0x28, 0x2a,
+ 0x2b, 0x2d, 0x2e, 0x30, 0x32, 0x33, 0x35,
+ 0x37, 0x39, 0x3b, 0x4b, 0x4c, 0x4d, 0x58
+};
+
+static const uint8_t ls_sd2_prot_idx_map[] = {
+ 0x07, 0x09, 0x0a, 0x0c, 0x0e, 0x10, 0x12,
+ 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 0x20,
+ 0x22, 0x24, 0x3d, 0x3f, 0x41, 0x43, 0x45,
+ 0x47, 0x49, 0x4f, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57
+};
+
+static const uint8_t ls_sd1_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x03*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x05*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x26*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x28*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x2a*/
+
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2d*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x2e*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x30*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x32*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x33*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x35*/
+ {1, 1, 0, 0, 0, 0, 0, 0}, /* 0x37*/
+
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x39*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x3b*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x4b*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 0x4c*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4d*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x58*/
+};
+
+static const uint8_t ls_sd2_eth_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x07*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x09*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x0e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x10*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x12*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x14*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x16*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x18*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1a*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1c*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x1e*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x20*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x22*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 0x24*/
+
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3d*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x3f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x41*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x43*/
+
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x45*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x47*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x49*/
+
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x4f*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x50*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0x51*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 0x52*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x53*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x54*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 0x55*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 0x56*/
+ {0, 0, 0, 0, 0, 0, 1, 1} /* 0x57*/
+};
+
+enum lsx_serdes_id {
+ LSX_SERDES_1 = 1,
+ LSX_SERDES_2 = 2
+};
+
+static const uint8_t lx_sd1_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 2 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 3 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 4 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 5 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 6 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 7 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 8 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 9 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 10 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1}, /* 11 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 13 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 14 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 15 prot*/
+ {0, 0, 1, 1, 0, 0, 0, 0}, /* 16 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 17 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 18 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 19 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 20 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1}, /* 21 prot*/
+ {1, 1, 1, 1, 0, 0, 1, 1} /* 22 prot*/
+};
+
+static const uint8_t lx_sd2_loopback_support[][LSX_SERDES_LAN_NB] = {
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 0 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 1 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 2 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 3 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 4 prot*/
+ {0, 0, 0, 0, 0, 0, 0, 0}, /* 5 prot*/
+ {0, 0, 0, 0, 1, 1, 1, 1}, /* 6 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 7 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 8 prot*/
+ {1, 1, 1, 1, 1, 1, 1, 1}, /* 9 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 10 prot*/
+ {0, 1, 1, 1, 0, 1, 1, 1}, /* 11 prot*/
+ {1, 1, 1, 1, 0, 0, 0, 0}, /* 12 prot*/
+ {0, 0, 0, 0, 0, 0, 1, 1}, /* 13 prot*/
+ {0, 0, 1, 1, 0, 0, 1, 1} /* 14 prot*/
+};
+
+static inline int
+ls_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 8)
+ return LSX_SERDES_1;
+ if (mac_id >= 9 && mac_id <= 16)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+lx_mac_to_serdes_id(uint8_t mac_id)
+{
+ if (mac_id >= 1 && mac_id <= 10)
+ return LSX_SERDES_1;
+ if (mac_id >= 11 && mac_id <= 18)
+ return LSX_SERDES_2;
+
+ return -1;
+}
+
+static inline int
+ls_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id)
+{
+ int i;
+
+ if (sd_id == LSX_SERDES_1) {
+ for (i = 0; i < (int)sizeof(ls_sd1_prot_idx_map); i++) {
+ if (ls_sd1_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ } else if (sd_id == LSX_SERDES_2) {
+ for (i = 0; i < (int)sizeof(ls_sd2_prot_idx_map); i++) {
+ if (ls_sd2_prot_idx_map[i] == sd_cfg)
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static inline int
+lx_serdes_cfg_to_idx(uint8_t sd_cfg, int sd_id __rte_unused)
+{
+ return sd_cfg;
+}
+
+static inline int
+ls_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *ls_sd_loopback_support;
+
+ sd_id = ls_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS1_REGSR - 1]) &
+ FSL_LS_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LS_SRDS2_REGSR - 1]) &
+ FSL_LS_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LS_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = ls_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0) {
+ DPAA2_PMD_ERR("Serdes protocol(0x%02x) does not exist\n",
+ sd_cfg);
+ return false;
+ }
+
+ if (sd_id == LSX_SERDES_1) {
+ ls_sd_loopback_support =
+ &ls_sd1_eth_loopback_support[sd_idx][0];
+ } else {
+ ls_sd_loopback_support =
+ &ls_sd2_eth_loopback_support[sd_idx][0];
+ }
+
+ if (sd_id == LSX_SERDES_1)
+ lan_id_tmp = (mac_id - 1);
+ else
+ lan_id_tmp = (mac_id - 9);
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB) {
+ DPAA2_PMD_ERR("Invalid serdes lan(%d).", lan_id_tmp);
+ return false;
+ }
+
+ if (!ls_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+lx_mac_serdes_lpbk_support(uint16_t mac_id,
+ uint16_t *serdes_id, uint16_t *lan_id)
+{
+ struct ccsr_gur *gur_base =
+ lsx_ccsr_map_region(CONFIG_SYS_FSL_GUTS_ADDR,
+ sizeof(struct ccsr_gur) / 64 * 64 + 64);
+ uint32_t sd_cfg;
+ int sd_id, sd_idx;
+ uint16_t lan_id_tmp = 0;
+ const uint8_t *lx_sd_loopback_support;
+
+ sd_id = lx_mac_to_serdes_id(mac_id);
+
+ if (sd_id == LSX_SERDES_1) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS1_REGSR - 1]) &
+ FSL_LX_SRDS1_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS1_PRTCL_SHIFT;
+ } else if (sd_id == LSX_SERDES_2) {
+ sd_cfg = rte_read32(&gur_base->rcwsr[FSL_LX_SRDS2_REGSR - 1]) &
+ FSL_LX_SRDS2_PRTCL_MASK;
+ sd_cfg >>= FSL_LX_SRDS2_PRTCL_SHIFT;
+ } else {
+ return false;
+ }
+ sd_cfg = sd_cfg & 0xff;
+
+ sd_idx = lx_serdes_cfg_to_idx(sd_cfg, sd_id);
+ if (sd_idx < 0)
+ return false;
+
+ if (sd_id == LSX_SERDES_1)
+ lx_sd_loopback_support = &lx_sd1_loopback_support[sd_idx][0];
+ else
+ lx_sd_loopback_support = &lx_sd2_loopback_support[sd_idx][0];
+
+ if (sd_id == LSX_SERDES_1) {
+ if (mac_id == 1)
+ lan_id_tmp = 0;
+ else if (mac_id == 2)
+ lan_id_tmp = 4;
+ else
+ lan_id_tmp = (mac_id - 3);
+ } else {
+ if (mac_id == 11)
+ lan_id_tmp = 0;
+ else if (mac_id == 12)
+ lan_id_tmp = 1;
+ else if (mac_id == 13)
+ lan_id_tmp = 6;
+ else if (mac_id == 14)
+ lan_id_tmp = 7;
+ else if (mac_id == 15)
+ lan_id_tmp = 4;
+ else if (mac_id == 16)
+ lan_id_tmp = 5;
+ else if (mac_id == 17)
+ lan_id_tmp = 2;
+ else if (mac_id == 18)
+ lan_id_tmp = 3;
+ else
+ return false;
+ }
+
+ if (lan_id_tmp >= LSX_SERDES_LAN_NB)
+ return false;
+
+ if (!lx_sd_loopback_support[lan_id_tmp])
+ return false;
+
+ if (lan_id)
+ *lan_id = lan_id_tmp;
+ if (serdes_id)
+ *serdes_id = sd_id;
+
+ return true;
+}
+
+static inline int
+ls_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id, lan_id;
+ int ret;
+ uint32_t data;
+ struct ccsr_ls_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = ls_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_ls_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].tsc3;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+static inline int
+lx_serdes_eth_lpbk(uint16_t mac_id, int en)
+{
+ uint16_t serdes_id = 0xffff, lan_id = 0xffff;
+ int ret;
+ uint32_t data;
+ struct ccsr_lx_serdes *serdes_base;
+ void *reg = 0;
+
+ ret = lx_mac_serdes_lpbk_support(mac_id, &serdes_id, &lan_id);
+ if (!ret)
+ return -ENOTSUP;
+
+ serdes_base = lsx_ccsr_map_region(CONFIG_SYS_FSL_SERDES_ADDR +
+ (serdes_id - LSX_SERDES_1) * 0x10000,
+ sizeof(struct ccsr_lx_serdes) / 64 * 64 + 64);
+ if (!serdes_base) {
+ DPAA2_PMD_ERR("Serdes register map failed\n");
+ return -ENOMEM;
+ }
+
+ if (serdes_id == LSX_SERDES_1)
+ lan_id = LSX_SERDES_LAN_NB - lan_id - 1;
+
+ reg = &serdes_base->lane[lan_id].lnatcsr0;
+
+ data = rte_read32(reg);
+ if (en)
+ rte_write32(data | LSX_LB_EN_BIT, reg);
+ else
+ rte_write32(data & (~LSX_LB_EN_BIT), reg);
+
+ return 0;
+}
+
+/* Configure dpaa2 port as recycle port */
+int
+dpaa2_dev_recycle_config(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret;
+
+ if (priv->flags & DPAA2_TX_LOOPBACK_MODE) {
+ DPAA2_PMD_INFO("%s has been configured recycle device.",
+ eth_dev->data->name);
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_MAC) {
+ /** For dpmac-dpni connection,
+ * try setting serdes loopback as recycle device at first.
+ */
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 1);
+ if (!ret) {
+ priv->flags |= DPAA2_TX_SERDES_LOOPBACK_MODE;
+ return 0;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+
+ /** If serdes loopback is not supported for this mac,
+ * trying set mac loopback.
+ */
+
+ port_cfg.loopback_en = 1;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to enable loopback", ret);
+ return -ENOTSUP;
+ }
+
+ priv->flags |= DPAA2_TX_MAC_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ if (dpaa2_dev->ep_dev_type == DPAA2_ETH &&
+ dpaa2_dev->object_id == dpaa2_dev->ep_object_id) {
+ priv->flags |= DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+int
+dpaa2_dev_recycle_deconfig(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct rte_dpaa2_device *dpaa2_dev =
+ container_of(dev, struct rte_dpaa2_device, device);
+ struct fsl_mc_io *dpni_dev = eth_dev->process_private;
+ struct dpni_port_cfg port_cfg;
+ int ret = 0;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE))
+ return 0;
+
+ if (priv->flags & DPAA2_TX_SERDES_LOOPBACK_MODE) {
+ if (dpaa2_svr_family == SVR_LS2088A) {
+ ret = ls_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else if (dpaa2_svr_family == SVR_LX2160A) {
+ ret = lx_serdes_eth_lpbk(dpaa2_dev->ep_object_id, 0);
+ if (ret) {
+ DPAA2_PMD_WARN("Error(%d) to disable Serdes loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_SERDES_LOOPBACK_MODE;
+ }
+ } else {
+ DPAA2_PMD_DEBUG("Serdes loopback not support SoC(0x%08x)",
+ dpaa2_svr_family);
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_MAC_LOOPBACK_MODE) {
+ port_cfg.loopback_en = 0;
+ ret = dpni_set_port_cfg(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ DPNI_PORT_CFG_LOOPBACK,
+ &port_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("Error(%d) to disable TX mac loopback",
+ ret);
+ } else {
+ priv->flags &= ~DPAA2_TX_MAC_LOOPBACK_MODE;
+ }
+ }
+
+ if (priv->flags & DPAA2_TX_DPNI_LOOPBACK_MODE)
+ priv->flags &= ~DPAA2_TX_DPNI_LOOPBACK_MODE;
+
+ return ret;
+}
+
+int
+dpaa2_dev_recycle_qp_setup(struct rte_dpaa2_device *dpaa2_dev,
+ uint16_t qidx, uint64_t cntx,
+ eth_rx_burst_t tx_lpbk, eth_tx_burst_t rx_lpbk,
+ struct dpaa2_queue **txq,
+ struct dpaa2_queue **rxq)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_data *data;
+ struct dpaa2_queue *txq_tmp;
+ struct dpaa2_queue *rxq_tmp;
+ struct dpaa2_dev_priv *priv;
+
+ dev = dpaa2_dev->eth_dev;
+ data = dev->data;
+ priv = data->dev_private;
+
+ if (!(priv->flags & DPAA2_TX_LOOPBACK_MODE) &&
+ (tx_lpbk || rx_lpbk)) {
+ DPAA2_PMD_ERR("%s is NOT recycle device!", data->name);
+
+ return -EINVAL;
+ }
+
+ if (qidx >= data->nb_rx_queues || qidx >= data->nb_tx_queues)
+ return -EINVAL;
+
+ rte_spinlock_lock(&priv->lpbk_qp_lock);
+
+ if (tx_lpbk)
+ dev->tx_pkt_burst = tx_lpbk;
+
+ if (rx_lpbk)
+ dev->rx_pkt_burst = rx_lpbk;
+
+ txq_tmp = data->tx_queues[qidx];
+ txq_tmp->lpbk_cntx = cntx;
+ rxq_tmp = data->rx_queues[qidx];
+ rxq_tmp->lpbk_cntx = cntx;
+
+ if (txq)
+ *txq = txq_tmp;
+ if (rxq)
+ *rxq = rxq_tmp;
+
+ rte_spinlock_unlock(&priv->lpbk_qp_lock);
+
+ return 0;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index b7a65cb637..7a2bc15eb4 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -3087,3 +3087,35 @@ int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
return err;
}
+/**
+ * dpni_set_port_cfg() - performs configurations at physical port connected on
+ * this dpni. The command have effect only if dpni is connected to
+ * another dpni object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @flags: Valid fields from port_cfg structure
+ * @port_cfg: Configuration data; one or more of DPNI_PORT_CFG_
+ * The command can be called only when dpni is connected to a dpmac object. If
+ * the dpni is unconnected or the endpoint is not a dpni it will return error.
+ * If dpmac endpoint is disconnected the settings will be lost
+ */
+int dpni_set_port_cfg(struct fsl_mc_io *mc_io, uint32_t cmd_flags,
+ uint16_t token, uint32_t flags, struct dpni_port_cfg *port_cfg)
+{
+ struct dpni_cmd_set_port_cfg *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PORT_CFG,
+ cmd_flags, token);
+
+ cmd_params = (struct dpni_cmd_set_port_cfg *)cmd.params;
+ cmd_params->flags = cpu_to_le32(flags);
+ dpni_set_field(cmd_params->bit_params, PORT_LOOPBACK_EN,
+ !!port_cfg->loopback_en);
+
+ /* send command to MC */
+ return mc_send_command(mc_io, &cmd);
+}
+
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
index ed0bd7615a..b7bd7556af 100644
--- a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -119,6 +119,7 @@
#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276)
#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_PORT_CFG DPNI_CMD(0x27B)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
index 21b827a259..51598c048c 100644
--- a/drivers/net/dpaa2/meson.build
+++ b/drivers/net/dpaa2/meson.build
@@ -14,6 +14,7 @@ sources = files(
'dpaa2_mux.c',
'dpaa2_ethdev.c',
'dpaa2_flow.c',
+ 'dpaa2_recycle.c',
'dpaa2_rxtx.c',
'dpaa2_sparser.c',
'dpaa2_ptp.c',
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v3 10/15] net/dpaa2: support recycle loopback port
2022-01-03 10:01 ` [PATCH v3 10/15] net/dpaa2: support recycle loopback port nipun.gupta
@ 2022-02-01 9:27 ` David Marchand
2022-02-01 9:34 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: David Marchand @ 2022-02-01 9:27 UTC (permalink / raw)
To: Nipun Gupta, Jun Yang
Cc: dev, Thomas Monjalon, Yigit, Ferruh, Hemant Agrawal, Stephen Hemminger
Hello guys,
On Mon, Jan 3, 2022 at 11:02 AM <nipun.gupta@nxp.com> wrote:
> diff --git a/drivers/net/dpaa2/dpaa2_recycle.c b/drivers/net/dpaa2/dpaa2_recycle.c
> new file mode 100644
> index 0000000000..e274d24ead
> --- /dev/null
> +++ b/drivers/net/dpaa2/dpaa2_recycle.c
> @@ -0,0 +1,780 @@
> +/* * SPDX-License-Identifier: BSD-3-Clause
> + *
> + * Copyright 2019-2021 NXP
> + *
> + */
> +
> +#include <time.h>
> +#include <net/if.h>
> +
> +#include <rte_mbuf.h>
> +#include <ethdev_driver.h>
> +#include <rte_malloc.h>
> +#include <rte_memcpy.h>
> +#include <rte_string_fns.h>
> +#include <rte_cycles.h>
> +#include <rte_kvargs.h>
> +#include <rte_dev.h>
> +#include <rte_fslmc.h>
> +#include <rte_flow_driver.h>
> +
> +#include "dpaa2_pmd_logs.h"
> +#include <fslmc_vfio.h>
> +#include <dpaa2_hw_pvt.h>
> +#include <dpaa2_hw_mempool.h>
> +#include <dpaa2_hw_dpio.h>
> +#include <mc/fsl_dpmng.h>
> +#include "dpaa2_ethdev.h"
> +#include "dpaa2_sparser.h"
> +#include <fsl_qbman_debug.h>
> +
> +#include <rte_io.h>
> +#include <unistd.h>
> +#include <sys/mman.h>
> +
> +#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> +#define PAGE_MASK (~(PAGE_SIZE - 1))
This patch breaks compilation in Alpine Linux in next-net and main repositories.
Can you provide a fix?
Thanks.
This was initially reported by UNH:
http://mails.dpdk.org/archives/test-report/2022-January/250111.html
FAILED: drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o
ccache cc -Idrivers/a715181@@tmp_rte_net_dpaa2@sta -Idrivers
-I../drivers -Idrivers/net/dpaa2 -I../drivers/net/dpaa2
-I../drivers/net/dpaa2/base -I../drivers/net/dpaa2/mc -Ilib/ethdev
-I../lib/ethdev -I. -I../ -Iconfig -I../config -Ilib/eal/include
-I../lib/eal/include -Ilib/eal/linux/include
-I../lib/eal/linux/include -Ilib/eal/x86/include
-I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common
-Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs
-Ilib/telemetry/../metrics -I../lib/telemetry/../metrics
-Ilib/telemetry -I../lib/telemetry -Ilib/net -I../lib/net -Ilib/mbuf
-I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring
-Ilib/meter -I../lib/meter -Idrivers/bus/pci -I../drivers/bus/pci
-I../drivers/bus/pci/linux -Ilib/pci -I../lib/pci -Idrivers/bus/vdev
-I../drivers/bus/vdev -Idrivers/mempool/dpaa2
-I../drivers/mempool/dpaa2 -Idrivers/bus/fslmc -I../drivers/bus/fslmc
-I../drivers/bus/fslmc/mc -I../drivers/bus/fslmc/qbman/include
-I../drivers/bus/fslmc/portal -Idrivers/common/dpaax
-I../drivers/common/dpaax -I../drivers/common/dpaax/caamflib
-Ilib/eventdev -I../lib/eventdev -Ilib/hash -I../lib/hash -Ilib/rcu
-I../lib/rcu -Ilib/timer -I../lib/timer -Ilib/cryptodev
-I../lib/cryptodev -fdiagnostics-color=always -pipe
-D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Werror -O3 -include
rte_config.h -Wextra -Wcast-qual -Wdeprecated -Wformat
-Wformat-nonliteral -Wformat-security -Wmissing-declarations
-Wmissing-prototypes -Wnested-externs -Wold-style-definition
-Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef
-Wwrite-strings -Wno-address-of-packed-member -Wno-packed-not-aligned
-Wno-missing-field-initializers -Wno-zero-length-bounds -D_GNU_SOURCE
-fPIC -march=native -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API
-Wno-format-truncation -DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.dpaa2 -MD
-MQ 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o'
-MF 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o.d'
-o 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o'
-c ../drivers/net/dpaa2/dpaa2_recycle.c
../drivers/net/dpaa2/dpaa2_recycle.c:35: error: "PAGE_SIZE" redefined [-Werror]
35 | #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
|
In file included from /usr/include/fortify/stdlib.h:29,
from ../lib/eal/include/rte_common.h:20,
from ../lib/mbuf/rte_mbuf.h:36,
from ../drivers/net/dpaa2/dpaa2_recycle.c:10:
/usr/include/limits.h:97: note: this is the location of the previous definition
97 | #define PAGE_SIZE PAGESIZE
|
cc1: all warnings being treated as errors
--
David Marchand
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH v3 10/15] net/dpaa2: support recycle loopback port
2022-02-01 9:27 ` David Marchand
@ 2022-02-01 9:34 ` Nipun Gupta
2022-02-01 9:43 ` Thomas Monjalon
0 siblings, 1 reply; 68+ messages in thread
From: Nipun Gupta @ 2022-02-01 9:34 UTC (permalink / raw)
To: David Marchand, Jun Yang
Cc: dev, Thomas Monjalon, Yigit, Ferruh, Hemant Agrawal, Stephen Hemminger
Hi David,
Sure, we will send a patch asap.
Regards,
Nipun
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: 01 February 2022 14:58
> To: Nipun Gupta <nipun.gupta@nxp.com>; Jun Yang <jun.yang@nxp.com>
> Cc: dev <dev@dpdk.org>; Thomas Monjalon <thomas@monjalon.net>; Yigit,
> Ferruh <ferruh.yigit@intel.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> Stephen Hemminger <stephen@networkplumber.org>
> Subject: Re: [PATCH v3 10/15] net/dpaa2: support recycle loopback port
>
> Hello guys,
>
> On Mon, Jan 3, 2022 at 11:02 AM <nipun.gupta@nxp.com> wrote:
> > diff --git a/drivers/net/dpaa2/dpaa2_recycle.c
> b/drivers/net/dpaa2/dpaa2_recycle.c
> > new file mode 100644
> > index 0000000000..e274d24ead
> > --- /dev/null
> > +++ b/drivers/net/dpaa2/dpaa2_recycle.c
> > @@ -0,0 +1,780 @@
> > +/* * SPDX-License-Identifier: BSD-3-Clause
> > + *
> > + * Copyright 2019-2021 NXP
> > + *
> > + */
> > +
> > +#include <time.h>
> > +#include <net/if.h>
> > +
> > +#include <rte_mbuf.h>
> > +#include <ethdev_driver.h>
> > +#include <rte_malloc.h>
> > +#include <rte_memcpy.h>
> > +#include <rte_string_fns.h>
> > +#include <rte_cycles.h>
> > +#include <rte_kvargs.h>
> > +#include <rte_dev.h>
> > +#include <rte_fslmc.h>
> > +#include <rte_flow_driver.h>
> > +
> > +#include "dpaa2_pmd_logs.h"
> > +#include <fslmc_vfio.h>
> > +#include <dpaa2_hw_pvt.h>
> > +#include <dpaa2_hw_mempool.h>
> > +#include <dpaa2_hw_dpio.h>
> > +#include <mc/fsl_dpmng.h>
> > +#include "dpaa2_ethdev.h"
> > +#include "dpaa2_sparser.h"
> > +#include <fsl_qbman_debug.h>
> > +
> > +#include <rte_io.h>
> > +#include <unistd.h>
> > +#include <sys/mman.h>
> > +
> > +#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> > +#define PAGE_MASK (~(PAGE_SIZE - 1))
>
> This patch breaks compilation in Alpine Linux in next-net and main repositories.
> Can you provide a fix?
> Thanks.
>
>
> This was initially reported by UNH:
> https://eur01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fmails.dpd
> k.org%2Farchives%2Ftest-report%2F2022-
> January%2F250111.html&data=04%7C01%7Cnipun.gupta%40nxp.com%7C
> a8213a91a0014639650d08d9e5652600%7C686ea1d3bc2b4c6fa92cd99c5c3016
> 35%7C0%7C0%7C637793044866714334%7CUnknown%7CTWFpbGZsb3d8eyJWI
> joiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C300
> 0&sdata=b%2BT8BinlyFWaYSLaaOLtNTN9TA0epJAxN0Si9jbom7I%3D&
> ;reserved=0
>
> FAILED:
> drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o
> ccache cc -Idrivers/a715181@@tmp_rte_net_dpaa2@sta -Idrivers
> -I../drivers -Idrivers/net/dpaa2 -I../drivers/net/dpaa2
> -I../drivers/net/dpaa2/base -I../drivers/net/dpaa2/mc -Ilib/ethdev
> -I../lib/ethdev -I. -I../ -Iconfig -I../config -Ilib/eal/include
> -I../lib/eal/include -Ilib/eal/linux/include
> -I../lib/eal/linux/include -Ilib/eal/x86/include
> -I../lib/eal/x86/include -Ilib/eal/common -I../lib/eal/common
> -Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs
> -Ilib/telemetry/../metrics -I../lib/telemetry/../metrics
> -Ilib/telemetry -I../lib/telemetry -Ilib/net -I../lib/net -Ilib/mbuf
> -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring
> -Ilib/meter -I../lib/meter -Idrivers/bus/pci -I../drivers/bus/pci
> -I../drivers/bus/pci/linux -Ilib/pci -I../lib/pci -Idrivers/bus/vdev
> -I../drivers/bus/vdev -Idrivers/mempool/dpaa2
> -I../drivers/mempool/dpaa2 -Idrivers/bus/fslmc -I../drivers/bus/fslmc
> -I../drivers/bus/fslmc/mc -I../drivers/bus/fslmc/qbman/include
> -I../drivers/bus/fslmc/portal -Idrivers/common/dpaax
> -I../drivers/common/dpaax -I../drivers/common/dpaax/caamflib
> -Ilib/eventdev -I../lib/eventdev -Ilib/hash -I../lib/hash -Ilib/rcu
> -I../lib/rcu -Ilib/timer -I../lib/timer -Ilib/cryptodev
> -I../lib/cryptodev -fdiagnostics-color=always -pipe
> -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Werror -O3 -include
> rte_config.h -Wextra -Wcast-qual -Wdeprecated -Wformat
> -Wformat-nonliteral -Wformat-security -Wmissing-declarations
> -Wmissing-prototypes -Wnested-externs -Wold-style-definition
> -Wpointer-arith -Wsign-compare -Wstrict-prototypes -Wundef
> -Wwrite-strings -Wno-address-of-packed-member -Wno-packed-not-aligned
> -Wno-missing-field-initializers -Wno-zero-length-bounds -D_GNU_SOURCE
> -fPIC -march=native -DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API
> -Wno-format-truncation -DRTE_LOG_DEFAULT_LOGTYPE=pmd.net.dpaa2 -MD
> -MQ
> 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o'
> -MF
> 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o.d'
> -o
> 'drivers/a715181@@tmp_rte_net_dpaa2@sta/net_dpaa2_dpaa2_recycle.c.o'
> -c ../drivers/net/dpaa2/dpaa2_recycle.c
> ../drivers/net/dpaa2/dpaa2_recycle.c:35: error: "PAGE_SIZE" redefined [-Werror]
> 35 | #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> |
> In file included from /usr/include/fortify/stdlib.h:29,
> from ../lib/eal/include/rte_common.h:20,
> from ../lib/mbuf/rte_mbuf.h:36,
> from ../drivers/net/dpaa2/dpaa2_recycle.c:10:
> /usr/include/limits.h:97: note: this is the location of the previous definition
> 97 | #define PAGE_SIZE PAGESIZE
> |
> cc1: all warnings being treated as errors
>
>
>
> --
> David Marchand
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v3 10/15] net/dpaa2: support recycle loopback port
2022-02-01 9:34 ` Nipun Gupta
@ 2022-02-01 9:43 ` Thomas Monjalon
2022-02-01 9:53 ` [PATCH] net/dpaa2: fix build with musl Thomas Monjalon
0 siblings, 1 reply; 68+ messages in thread
From: Thomas Monjalon @ 2022-02-01 9:43 UTC (permalink / raw)
To: Jun Yang, Nipun Gupta
Cc: David Marchand, dev, Yigit, Ferruh, Hemant Agrawal, Stephen Hemminger
01/02/2022 10:34, Nipun Gupta:
>
> Hi David,
> Sure, we will send a patch asap.
Just need this:
#ifndef PAGE_SIZE
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
#endif
> From: David Marchand <david.marchand@redhat.com>
> >
> > Hello guys,
> >
> > > +#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> > > +#define PAGE_MASK (~(PAGE_SIZE - 1))
> >
> > This patch breaks compilation in Alpine Linux in next-net and main repositories.
That's a pity I missed it when pulling.
Please be more cautious with errors reported by the CI, thanks.
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH] net/dpaa2: fix build with musl
2022-02-01 9:43 ` Thomas Monjalon
@ 2022-02-01 9:53 ` Thomas Monjalon
2022-02-01 10:10 ` Nipun Gupta
0 siblings, 1 reply; 68+ messages in thread
From: Thomas Monjalon @ 2022-02-01 9:53 UTC (permalink / raw)
To: dev; +Cc: jun.yang, nipun.gupta, hemant.agrawal, David Marchand, Sachin Saxena
PAGE_SIZE is already defined in musl libc:
drivers/net/dpaa2/dpaa2_recycle.c:35: error: "PAGE_SIZE" redefined
/usr/include/limits.h:97: note:
this is the location of the previous definition
97 | #define PAGE_SIZE PAGESIZE
Fixes: f023d059769f ("net/dpaa2: support recycle loopback port")
Reported-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
drivers/net/dpaa2/dpaa2_recycle.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/dpaa2/dpaa2_recycle.c b/drivers/net/dpaa2/dpaa2_recycle.c
index c5e9e9721d..336506dc0d 100644
--- a/drivers/net/dpaa2/dpaa2_recycle.c
+++ b/drivers/net/dpaa2/dpaa2_recycle.c
@@ -32,7 +32,9 @@
#include <unistd.h>
#include <sys/mman.h>
+#ifndef PAGE_SIZE
#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define LSX_SERDES_LAN_NB 8
--
2.34.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* RE: [PATCH] net/dpaa2: fix build with musl
2022-02-01 9:53 ` [PATCH] net/dpaa2: fix build with musl Thomas Monjalon
@ 2022-02-01 10:10 ` Nipun Gupta
2022-02-01 11:03 ` Thomas Monjalon
0 siblings, 1 reply; 68+ messages in thread
From: Nipun Gupta @ 2022-02-01 10:10 UTC (permalink / raw)
To: Thomas Monjalon, dev
Cc: Jun Yang, Hemant Agrawal, David Marchand, Sachin Saxena (OSS)
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: 01 February 2022 15:24
> To: dev@dpdk.org
> Cc: Jun Yang <jun.yang@nxp.com>; Nipun Gupta <nipun.gupta@nxp.com>;
> Hemant Agrawal <hemant.agrawal@nxp.com>; David Marchand
> <david.marchand@redhat.com>; Sachin Saxena (OSS)
> <sachin.saxena@oss.nxp.com>
> Subject: [PATCH] net/dpaa2: fix build with musl
>
> PAGE_SIZE is already defined in musl libc:
>
> drivers/net/dpaa2/dpaa2_recycle.c:35: error: "PAGE_SIZE" redefined
> /usr/include/limits.h:97: note:
> this is the location of the previous definition
> 97 | #define PAGE_SIZE PAGESIZE
>
> Fixes: f023d059769f ("net/dpaa2: support recycle loopback port")
>
> Reported-by: David Marchand <david.marchand@redhat.com>
> Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> ---
> drivers/net/dpaa2/dpaa2_recycle.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/drivers/net/dpaa2/dpaa2_recycle.c
> b/drivers/net/dpaa2/dpaa2_recycle.c
> index c5e9e9721d..336506dc0d 100644
> --- a/drivers/net/dpaa2/dpaa2_recycle.c
> +++ b/drivers/net/dpaa2/dpaa2_recycle.c
> @@ -32,7 +32,9 @@
> #include <unistd.h>
> #include <sys/mman.h>
>
> +#ifndef PAGE_SIZE
> #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
> +#endif
> #define PAGE_MASK (~(PAGE_SIZE - 1))
>
> #define LSX_SERDES_LAN_NB 8
> --
> 2.34.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH] net/dpaa2: fix build with musl
2022-02-01 10:10 ` Nipun Gupta
@ 2022-02-01 11:03 ` Thomas Monjalon
0 siblings, 0 replies; 68+ messages in thread
From: Thomas Monjalon @ 2022-02-01 11:03 UTC (permalink / raw)
To: Nipun Gupta
Cc: dev, Jun Yang, Hemant Agrawal, David Marchand, Sachin Saxena (OSS)
> > PAGE_SIZE is already defined in musl libc:
> >
> > drivers/net/dpaa2/dpaa2_recycle.c:35: error: "PAGE_SIZE" redefined
> > /usr/include/limits.h:97: note:
> > this is the location of the previous definition
> > 97 | #define PAGE_SIZE PAGESIZE
> >
> > Fixes: f023d059769f ("net/dpaa2: support recycle loopback port")
> >
> > Reported-by: David Marchand <david.marchand@redhat.com>
> > Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
> Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
Applied
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 11/15] net/dpaa: check status before configuring shared MAC
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (9 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 10/15] net/dpaa2: support recycle loopback port nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 12/15] net/dpaa: enable checksum for shared MAC interface nipun.gupta
` (4 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
For shared MAC interface, it is a prerequisite to enable the
interface in the kernel, before using it in user-space. This
patch makes sure that device is not getting configured in
case shared MAC interface is not enabled in the kernel.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/dpaa/base/fman/fman_hw.c | 11 +++++++++++
drivers/bus/dpaa/include/fsl_fman.h | 2 ++
drivers/bus/dpaa/version.map | 1 +
drivers/net/dpaa/dpaa_ethdev.c | 13 ++++++++++++-
4 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index af9bac76c2..24a99f7235 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -314,6 +314,17 @@ fman_if_disable_rx(struct fman_if *p)
out_be32(__if->ccsr_map + 8, in_be32(__if->ccsr_map + 8) & ~(u32)2);
}
+int
+fman_if_get_rx_status(struct fman_if *p)
+{
+ struct __fman_if *__if = container_of(p, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ /* return true if RX bit is set */
+ return !!(in_be32(__if->ccsr_map + 8) & (u32)2);
+}
+
void
fman_if_loopback_enable(struct fman_if *p)
{
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index f3a5d05970..acb344584f 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -81,6 +81,8 @@ __rte_internal
void fman_if_enable_rx(struct fman_if *p);
__rte_internal
void fman_if_disable_rx(struct fman_if *p);
+__rte_internal
+int fman_if_get_rx_status(struct fman_if *p);
/* Enable/disable loopback on specific interfaces */
__rte_internal
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 900635b210..1a840fd1a5 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -33,6 +33,7 @@ INTERNAL {
fman_if_get_fdoff;
fman_if_get_maxfrm;
fman_if_get_sg_enable;
+ fman_if_get_rx_status;
fman_if_loopback_disable;
fman_if_loopback_enable;
fman_if_promiscuous_disable;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index e49f765434..3972ecaed8 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -195,6 +195,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint64_t tx_offloads = eth_conf->txmode.offloads;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct rte_device *rdev = dev->device;
struct rte_eth_link *link = &dev->data->dev_link;
struct rte_dpaa_device *dpaa_dev;
@@ -203,7 +204,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
struct rte_intr_handle *intr_handle;
uint32_t max_rx_pktlen;
int speed, duplex;
- int ret;
+ int ret, rx_status;
PMD_INIT_FUNC_TRACE();
@@ -211,6 +212,16 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
intr_handle = dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
+ /* Check if interface is enabled in case of shared MAC */
+ if (fif->is_shared_mac) {
+ rx_status = fman_if_get_rx_status(fif);
+ if (!rx_status) {
+ DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+ dpaa_intf->name);
+ return -EHOSTDOWN;
+ }
+ }
+
/* Rx offloads which are enabled by default */
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_INFO(
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 12/15] net/dpaa: enable checksum for shared MAC interface
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (10 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 11/15] net/dpaa: check status before configuring shared MAC nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 13/15] net/enetc: add support for VFs nipun.gupta
` (3 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev
Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Brick Yang, Nipun Gupta
From: Nipun Gupta <nipun.gupta@nxp.com>
In case of shared MAC B0V bit in contextA is required
to be set to set so that ASPID is 0.
Signed-off-by: Brick Yang <brick.yang@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/dpaa/dpaa_ethdev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 3972ecaed8..7135a5998d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -1755,6 +1755,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ if (fman_ip_rev >= FMAN_V3) {
+ /* Set B0V bit in contextA to set ASPID to 0 */
+ opts.fqd.context_a.hi |= 0x04000000;
+ }
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
if (cgr_tx) {
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 13/15] net/enetc: add support for VFs
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (11 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 12/15] net/dpaa: enable checksum for shared MAC interface nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 14/15] net/pfe: reduce driver initialization time nipun.gupta
` (2 subsequent siblings)
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Gagandeep Singh
From: Gagandeep Singh <g.singh@nxp.com>
Add virtual function support for enetc devices
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/net/enetc/enetc_ethdev.c | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/drivers/net/enetc/enetc_ethdev.c b/drivers/net/enetc/enetc_ethdev.c
index 7cdb8ce463..1b4337bc48 100644
--- a/drivers/net/enetc/enetc_ethdev.c
+++ b/drivers/net/enetc/enetc_ethdev.c
@@ -19,6 +19,9 @@ enetc_dev_start(struct rte_eth_dev *dev)
uint32_t val;
PMD_INIT_FUNC_TRACE();
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
@@ -55,6 +58,9 @@ enetc_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
dev->data->dev_started = 0;
+ if (hw->device_id == ENETC_DEV_ID_VF)
+ return 0;
+
/* Disable port */
val = enetc_port_rd(enetc_hw, ENETC_PMR);
enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
@@ -160,11 +166,20 @@ enetc_hardware_init(struct enetc_eth_hw *hw)
/* Enabling Station Interface */
enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
- *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
- high_mac = (uint32_t)*mac;
- mac++;
- *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
- low_mac = (uint16_t)*mac;
+
+ if (hw->device_id == ENETC_DEV_ID_VF) {
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
+ low_mac = (uint16_t)*mac;
+ } else {
+ *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
+ high_mac = (uint32_t)*mac;
+ mac++;
+ *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
+ low_mac = (uint16_t)*mac;
+ }
if ((high_mac | low_mac) == 0) {
char *first_byte;
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 14/15] net/pfe: reduce driver initialization time
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (12 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 13/15] net/enetc: add support for VFs nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-03 10:01 ` [PATCH v3 15/15] net/pfe: remove setting unused value nipun.gupta
2022-01-12 6:05 ` [PATCH v3 00/15] features and fixes on NXP eth devices Hemant Agrawal
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev
Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Gagandeep Singh,
Nipun Gupta
From: Gagandeep Singh <g.singh@nxp.com>
This patch reduces the delay in the device init.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/net/pfe/pfe_hif.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_hif.c b/drivers/net/pfe/pfe_hif.c
index c4a7154ba7..8a10f10f56 100644
--- a/drivers/net/pfe/pfe_hif.c
+++ b/drivers/net/pfe/pfe_hif.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2021 NXP
*/
#include "pfe_logs.h"
@@ -766,7 +766,7 @@ pfe_hif_rx_idle(struct pfe_hif *hif)
if (rx_status & BDP_CSR_RX_DMA_ACTV)
send_dummy_pkt_to_hif();
- sleep(1);
+ rte_delay_ms(1);
} while (--hif_stop_loop);
if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* [PATCH v3 15/15] net/pfe: remove setting unused value
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (13 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 14/15] net/pfe: reduce driver initialization time nipun.gupta
@ 2022-01-03 10:01 ` nipun.gupta
2022-01-12 6:05 ` [PATCH v3 00/15] features and fixes on NXP eth devices Hemant Agrawal
15 siblings, 0 replies; 68+ messages in thread
From: nipun.gupta @ 2022-01-03 10:01 UTC (permalink / raw)
To: dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen, Apeksha Gupta
From: Apeksha Gupta <apeksha.gupta@nxp.com>
remove setting link status where it is not being used
Signed-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>
---
drivers/net/pfe/pfe_ethdev.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c
index 047010e15e..e5aaf5dcfd 100644
--- a/drivers/net/pfe/pfe_ethdev.c
+++ b/drivers/net/pfe/pfe_ethdev.c
@@ -587,8 +587,7 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
if (ret != 0) {
PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
- /* use dummy link value */
- link.link_status = 1;
+ return -1;
}
PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
lstatus, priv->id);
--
2.17.1
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v3 00/15] features and fixes on NXP eth devices
2022-01-03 10:01 ` [PATCH v3 00/15] " nipun.gupta
` (14 preceding siblings ...)
2022-01-03 10:01 ` [PATCH v3 15/15] net/pfe: remove setting unused value nipun.gupta
@ 2022-01-12 6:05 ` Hemant Agrawal
2022-01-20 15:26 ` Ferruh Yigit
15 siblings, 1 reply; 68+ messages in thread
From: Hemant Agrawal @ 2022-01-12 6:05 UTC (permalink / raw)
To: nipun.gupta, dev; +Cc: thomas, ferruh.yigit, hemant.agrawal, stephen
Series-
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
On 1/3/2022 3:31 PM, nipun.gupta@nxp.com wrote:
> From: Nipun Gupta <nipun.gupta@nxp.com>
>
> This series adds few features and important fixes on DPAA,
> PFE and ENETC devices.
>
> Features added:
> - level 2 support for shaping on DPAA2
> - loopback configuration for DPNI devices on DPAA2
> - Multiple TXQ's enqueue for ordered queues for performance
> - VFs support on ENETC
>
> Fixes:
> - fix unregistering interrupt handler on DPAA2
> - fix timestamping for IEEE1588 on DPAA1
>
> Changes in v2:
> - fix checkpatch errors
>
> Changes in v3:
> - remove unrequired PFE HW checksum patch
> - use predefined API for adding delay
> - use macro value for allocating mbuf in event
>
> Apeksha Gupta (1):
> net/pfe: remove setting unused value
>
> Gagandeep Singh (3):
> net/dpaa2: add support for level 2 in traffic management
> net/enetc: add support for VFs
> net/pfe: reduce driver initialization time
>
> Jun Yang (4):
> net/dpaa2: support multiple txqs en-queue for ordered
> net/dpaa2: secondary process handling for dpni
> bus/fslmc: add and scan dprc devices
> net/dpaa2: support recycle loopback port
>
> Nipun Gupta (4):
> bus/fslmc: update MC to 10.29
> bus/fslmc: use dmb oshst for synchronization before I/O
> net/dpaa: check status before configuring shared MAC
> net/dpaa: enable checksum for shared MAC interface
>
> Rohit Raj (1):
> net/dpaa2: warn user in case of high nb desc
>
> Vanshika Shukla (2):
> net/dpaa2: fix unregistering interrupt handler
> net/dpaa2: fix timestamping for IEEE1588
>
> doc/guides/nics/dpaa2.rst | 2 +-
> drivers/bus/dpaa/base/fman/fman_hw.c | 11 +
> drivers/bus/dpaa/include/fsl_fman.h | 2 +
> drivers/bus/dpaa/version.map | 1 +
> drivers/bus/fslmc/fslmc_bus.c | 15 +-
> drivers/bus/fslmc/fslmc_vfio.c | 18 +-
> drivers/bus/fslmc/mc/dprc.c | 129 ++++
> drivers/bus/fslmc/mc/fsl_dpmng.h | 2 +-
> drivers/bus/fslmc/mc/fsl_dprc.h | 46 ++
> drivers/bus/fslmc/mc/fsl_dprc_cmd.h | 48 ++
> drivers/bus/fslmc/meson.build | 4 +-
> drivers/bus/fslmc/portal/dpaa2_hw_dprc.c | 100 +++
> drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 15 +-
> drivers/bus/fslmc/qbman/include/compat.h | 4 +-
> drivers/bus/fslmc/rte_fslmc.h | 10 +-
> drivers/event/dpaa2/dpaa2_eventdev.c | 12 +-
> drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 23 +
> drivers/mempool/dpaa2/rte_dpaa2_mempool.h | 15 +
> drivers/mempool/dpaa2/version.map | 1 +
> drivers/net/dpaa/dpaa_ethdev.c | 17 +-
> drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 5 +-
> drivers/net/dpaa2/dpaa2_ethdev.c | 117 +++-
> drivers/net/dpaa2/dpaa2_ethdev.h | 38 +-
> drivers/net/dpaa2/dpaa2_ptp.c | 8 +-
> drivers/net/dpaa2/dpaa2_recycle.c | 780 ++++++++++++++++++++++
> drivers/net/dpaa2/dpaa2_rxtx.c | 181 ++++-
> drivers/net/dpaa2/dpaa2_tm.c | 563 +++++++++++++---
> drivers/net/dpaa2/dpaa2_tm.h | 17 +-
> drivers/net/dpaa2/mc/dpdmux.c | 8 +
> drivers/net/dpaa2/mc/dpkg.c | 7 +-
> drivers/net/dpaa2/mc/dpni.c | 417 ++++++++----
> drivers/net/dpaa2/mc/fsl_dpdmux.h | 3 +
> drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h | 5 +-
> drivers/net/dpaa2/mc/fsl_dpni.h | 173 +++--
> drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 137 ++--
> drivers/net/dpaa2/meson.build | 1 +
> drivers/net/dpaa2/version.map | 1 +
> drivers/net/enetc/enetc_ethdev.c | 25 +-
> drivers/net/pfe/pfe_ethdev.c | 3 +-
> drivers/net/pfe/pfe_hif.c | 4 +-
> 40 files changed, 2519 insertions(+), 449 deletions(-)
> create mode 100644 drivers/bus/fslmc/mc/dprc.c
> create mode 100644 drivers/bus/fslmc/mc/fsl_dprc.h
> create mode 100644 drivers/bus/fslmc/mc/fsl_dprc_cmd.h
> create mode 100644 drivers/bus/fslmc/portal/dpaa2_hw_dprc.c
> create mode 100644 drivers/net/dpaa2/dpaa2_recycle.c
>
^ permalink raw reply [flat|nested] 68+ messages in thread
* Re: [PATCH v3 00/15] features and fixes on NXP eth devices
2022-01-12 6:05 ` [PATCH v3 00/15] features and fixes on NXP eth devices Hemant Agrawal
@ 2022-01-20 15:26 ` Ferruh Yigit
0 siblings, 0 replies; 68+ messages in thread
From: Ferruh Yigit @ 2022-01-20 15:26 UTC (permalink / raw)
To: hemant.agrawal, nipun.gupta, dev; +Cc: thomas, stephen
On 1/12/2022 6:05 AM, Hemant Agrawal wrote:
>
> On 1/3/2022 3:31 PM, nipun.gupta@nxp.com wrote:
>> From: Nipun Gupta <nipun.gupta@nxp.com>
>>
>> This series adds few features and important fixes on DPAA,
>> PFE and ENETC devices.
>>
>> Features added:
>> - level 2 support for shaping on DPAA2
>> - loopback configuration for DPNI devices on DPAA2
>> - Multiple TXQ's enqueue for ordered queues for performance
>> - VFs support on ENETC
>>
>> Fixes:
>> - fix unregistering interrupt handler on DPAA2
>> - fix timestamping for IEEE1588 on DPAA1
>>
>> Changes in v2:
>> - fix checkpatch errors
>>
>> Changes in v3:
>> - remove unrequired PFE HW checksum patch
>> - use predefined API for adding delay
>> - use macro value for allocating mbuf in event
>>
>> Apeksha Gupta (1):
>> net/pfe: remove setting unused value
>>
>> Gagandeep Singh (3):
>> net/dpaa2: add support for level 2 in traffic management
>> net/enetc: add support for VFs
>> net/pfe: reduce driver initialization time
>>
>> Jun Yang (4):
>> net/dpaa2: support multiple txqs en-queue for ordered
>> net/dpaa2: secondary process handling for dpni
>> bus/fslmc: add and scan dprc devices
>> net/dpaa2: support recycle loopback port
>>
>> Nipun Gupta (4):
>> bus/fslmc: update MC to 10.29
>> bus/fslmc: use dmb oshst for synchronization before I/O
>> net/dpaa: check status before configuring shared MAC
>> net/dpaa: enable checksum for shared MAC interface
>>
>> Rohit Raj (1):
>> net/dpaa2: warn user in case of high nb desc
>>
>> Vanshika Shukla (2):
>> net/dpaa2: fix unregistering interrupt handler
>> net/dpaa2: fix timestamping for IEEE1588
>>
>
> Series-
>
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>
Series applied to dpdk-next-net/main, thanks.
^ permalink raw reply [flat|nested] 68+ messages in thread