* [PATCH v2 01/70] net/ice/base: add netlist helper functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
@ 2022-08-15 7:30 ` Qi Zhang
2022-08-15 6:28 ` Yang, Qiming
2022-08-15 7:30 ` [PATCH v2 02/70] net/ice/base: get NVM CSS Header length from the CSS Header Qi Zhang
` (69 subsequent siblings)
70 siblings, 1 reply; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:30 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Michal Michalik
Add new functions to check in netlist if HW has:
- Recovered Clock device,
- Clock Generation Unit,
- Clock Multiplexer,
- GPS generic device.
Signed-off-by: Michal Michalik <michal.michalik@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 54 +++++++++++
drivers/net/ice/base/ice_common.c | 130 +++++++++++++++++++++-----
drivers/net/ice/base/ice_common.h | 10 ++
drivers/net/ice/base/ice_ptp_hw.c | 37 +++++---
drivers/net/ice/base/ice_ptp_hw.h | 1 +
5 files changed, 195 insertions(+), 37 deletions(-)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 253b971dfd..a3add411b8 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1635,6 +1635,7 @@ struct ice_aqc_link_topo_params {
#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11
#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
(0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
@@ -1672,9 +1673,61 @@ struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
u8 rsvd[9];
};
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ice_aqc_get_link_topo_pin {
+ struct ice_aqc_link_topo_addr addr;
+ u8 input_io_params;
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \
+ (0x1F << ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S)
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1
+#define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2
+#define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10
+#define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \
+ (0x1F << \ ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
+/* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \
+ (0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \
+ (0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
/* Read/Write I2C (direct, 0x06E2/0x06E3) */
struct ice_aqc_i2c {
struct ice_aqc_link_topo_addr topo_addr;
@@ -2936,6 +2989,7 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_get_link_topo_pin get_link_topo_pin;
struct ice_aqc_set_health_status_config
set_health_status_config;
struct ice_aqc_get_supported_health_status_codes
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index db87bacd97..edc24030ec 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -396,37 +396,103 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
}
/**
- * ice_aq_get_link_topo_handle - get link topology node return status
- * @pi: port information structure
- * @node_type: requested node type
- * @cd: pointer to command details structure or NULL
- *
- * Get link topology node return status for specified node type (0x06E0)
- *
- * Node type cage can be used to determine if cage is present. If AQC
- * returns error (ENOENT), then no cage present. If no cage present, then
- * connection type is backplane or BASE-T.
+ * ice_aq_get_netlist_node_pin
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo_pin AQ structure
+ * @node_handle: output node handle parameter if node found
*/
-static enum ice_status
-ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
- struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_get_netlist_node_pin(struct ice_hw *hw,
+ struct ice_aqc_get_link_topo_pin *cmd,
+ u16 *node_handle)
{
- struct ice_aqc_get_link_topo *cmd;
struct ice_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo_pin);
+ desc.params.get_link_topo_pin = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return ICE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ LE16_TO_CPU(desc.params.get_link_topo_pin.addr.handle);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ */
+enum ice_status
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
- ICE_AQC_LINK_TOPO_NODE_CTX_S);
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return ICE_ERR_NOT_SUPPORTED;
- /* set node type */
- cmd->addr.topo_params.node_type_ctx |=
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+ if (node_handle)
+ *node_handle =
+ LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
- return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+ return ICE_SUCCESS;
+}
+
+#define MAX_NETLIST_SIZE 10
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
+ * otherwise. If node_handle provided, it would be set to found node handle.
+ */
+enum ice_status
+ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
+ u16 *node_handle)
+{
+ struct ice_aqc_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ u8 idx;
+
+ for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
+ enum ice_status status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return ICE_SUCCESS;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
}
/**
@@ -438,13 +504,27 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
*/
static bool ice_is_media_cage_present(struct ice_port_info *pi)
{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.topo_params.node_type_ctx |=
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
+
/* Node type cage can be used to determine if cage is present. If AQC
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
- return !ice_aq_get_link_topo_handle(pi,
- ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
- NULL);
+ return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
}
/**
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index a3cbf4fb05..1044a3088e 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -145,6 +145,16 @@ enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_netlist_node_pin(struct ice_hw *hw,
+ struct ice_aqc_get_link_topo_pin *cmd,
+ u16 *node_handle);
+enum ice_status
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+enum ice_status
+ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
+ u16 *node_handle);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 7e797c9511..5b366c95c5 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -3095,10 +3095,10 @@ ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
static enum ice_status
ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle)
{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo cmd;
+ u8 node_part_number, idx;
enum ice_status status;
- u8 idx;
+ u16 node_handle;
if (!hw || !pca9575_handle)
return ICE_ERR_PARAM;
@@ -3109,12 +3109,10 @@ ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle)
return ICE_SUCCESS;
}
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ memset(&cmd, 0, sizeof(cmd));
/* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
+ cmd.addr.topo_params.node_type_ctx =
(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
@@ -3129,24 +3127,39 @@ ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle)
else
return ICE_ERR_NOT_SUPPORTED;
- cmd->addr.topo_params.index = idx;
+ cmd.addr.topo_params.index = idx;
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
if (status)
return ICE_ERR_NOT_SUPPORTED;
/* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575)
+ if (node_part_number != ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575)
return ICE_ERR_NOT_SUPPORTED;
/* If present save the handle and return it */
- hw->io_expander_handle = desc.params.get_link_topo.addr.handle;
+ hw->io_expander_handle = node_handle;
*pca9575_handle = hw->io_expander_handle;
return ICE_SUCCESS;
}
+/**
+ * ice_is_gps_present_e810t
+ * @hw: pointer to the hw struct
+ *
+ * Check if the GPS generic device is present in the netlist
+ */
+bool ice_is_gps_present_e810t(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
+ return false;
+
+ return true;
+}
+
/**
* ice_read_e810t_pca9575_reg
* @hw: pointer to the hw struct
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index ee3366e83c..4f349593aa 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -221,6 +221,7 @@ enum ice_status ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
enum ice_status ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
/* E810 family functions */
+bool ice_is_gps_present_e810t(struct ice_hw *hw);
enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw);
enum ice_status
ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 01/70] net/ice/base: add netlist helper functions
2022-08-15 7:30 ` [PATCH v2 01/70] net/ice/base: add netlist helper functions Qi Zhang
@ 2022-08-15 6:28 ` Yang, Qiming
0 siblings, 0 replies; 149+ messages in thread
From: Yang, Qiming @ 2022-08-15 6:28 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev, Michalik, Michal
Hi,
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Michalik, Michal
> <michal.michalik@intel.com>
> Subject: [PATCH v2 01/70] net/ice/base: add netlist helper functions
>
> Add new functions to check in netlist if HW has:
> - Recovered Clock device,
> - Clock Generation Unit,
> - Clock Multiplexer,
> - GPS generic device.
>
> Signed-off-by: Michal Michalik <michal.michalik@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ice/base/ice_adminq_cmd.h | 54 +++++++++++
> drivers/net/ice/base/ice_common.c | 130 +++++++++++++++++++++-----
> drivers/net/ice/base/ice_common.h | 10 ++
> drivers/net/ice/base/ice_ptp_hw.c | 37 +++++---
> drivers/net/ice/base/ice_ptp_hw.h | 1 +
> 5 files changed, 195 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/net/ice/base/ice_adminq_cmd.h
> b/drivers/net/ice/base/ice_adminq_cmd.h
> index 253b971dfd..a3add411b8 100644
> --- a/drivers/net/ice/base/ice_adminq_cmd.h
> +++ b/drivers/net/ice/base/ice_adminq_cmd.h
> @@ -1635,6 +1635,7 @@ struct ice_aqc_link_topo_params {
> #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
> #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
> #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
> +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11
> #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
> #define ICE_AQC_LINK_TOPO_NODE_CTX_M \
> (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
> @@ -1672,9 +1673,61 @@ struct ice_aqc_get_link_topo {
> struct ice_aqc_link_topo_addr addr;
> u8 node_part_num;
> #define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
> +#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
> u8 rsvd[9];
> };
>
> +/* Get Link Topology Pin (direct, 0x06E1) */ struct
> +ice_aqc_get_link_topo_pin {
> + struct ice_aqc_link_topo_addr addr;
> + u8 input_io_params;
> +#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0
> +#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \
> + (0x1F <<
> ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S)
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11
> +/* 12 repeats intentionally due to two different uses depending on
> context */
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13
> +#define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14
> +#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5
> +#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \
> + (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
> +#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_GPIO 3
> +/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
> + u8 output_io_params;
> +#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0
> +#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \
> + (0x1F << \
> ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
> +/* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */
> +#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5
> +#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \
> + (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
> +/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
> + u8 output_io_flags;
> +#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0
> +#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \
> + (0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S)
> +#define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3
> +#define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \
> + (0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S)
> +#define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5)
> +#define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6)
> +#define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
> + u8 rsvd[7];
> +};
> +
> /* Read/Write I2C (direct, 0x06E2/0x06E3) */ struct ice_aqc_i2c {
> struct ice_aqc_link_topo_addr topo_addr; @@ -2936,6 +2989,7 @@
> struct ice_aq_desc {
> struct ice_aqc_get_link_status get_link_status;
> struct ice_aqc_event_lan_overflow lan_overflow;
> struct ice_aqc_get_link_topo get_link_topo;
> + struct ice_aqc_get_link_topo_pin get_link_topo_pin;
> struct ice_aqc_set_health_status_config
> set_health_status_config;
> struct ice_aqc_get_supported_health_status_codes
> diff --git a/drivers/net/ice/base/ice_common.c
> b/drivers/net/ice/base/ice_common.c
> index db87bacd97..edc24030ec 100644
> --- a/drivers/net/ice/base/ice_common.c
> +++ b/drivers/net/ice/base/ice_common.c
> @@ -396,37 +396,103 @@ ice_aq_get_phy_caps(struct ice_port_info *pi,
> bool qual_mods, u8 report_mode, }
>
> /**
> - * ice_aq_get_link_topo_handle - get link topology node return status
> - * @pi: port information structure
> - * @node_type: requested node type
> - * @cd: pointer to command details structure or NULL
> - *
> - * Get link topology node return status for specified node type (0x06E0)
> - *
> - * Node type cage can be used to determine if cage is present. If AQC
> - * returns error (ENOENT), then no cage present. If no cage present, then
> - * connection type is backplane or BASE-T.
> + * ice_aq_get_netlist_node_pin
> + * @hw: pointer to the hw struct
> + * @cmd: get_link_topo_pin AQ structure
> + * @node_handle: output node handle parameter if node found
> */
> -static enum ice_status
> -ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
> - struct ice_sq_cd *cd)
> +enum ice_status
> +ice_aq_get_netlist_node_pin(struct ice_hw *hw,
> + struct ice_aqc_get_link_topo_pin *cmd,
> + u16 *node_handle)
> {
> - struct ice_aqc_get_link_topo *cmd;
> struct ice_aq_desc desc;
>
> - cmd = &desc.params.get_link_topo;
> + ice_fill_dflt_direct_cmd_desc(&desc,
> ice_aqc_opc_get_link_topo_pin);
> + desc.params.get_link_topo_pin = *cmd;
> +
> + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
> + return ICE_ERR_NOT_SUPPORTED;
> +
> + if (node_handle)
> + *node_handle =
> +
> LE16_TO_CPU(desc.params.get_link_topo_pin.addr.handle);
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_aq_get_netlist_node
> + * @hw: pointer to the hw struct
> + * @cmd: get_link_topo AQ structure
> + * @node_part_number: output node part number if node found
> + * @node_handle: output node handle parameter if node found */ enum
> +ice_status ice_aq_get_netlist_node(struct ice_hw *hw, struct
> +ice_aqc_get_link_topo *cmd,
> + u8 *node_part_number, u16 *node_handle) {
> + struct ice_aq_desc desc;
>
> ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
> + desc.params.get_link_topo = *cmd;
>
> - cmd->addr.topo_params.node_type_ctx =
> - (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
> - ICE_AQC_LINK_TOPO_NODE_CTX_S);
> + if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
> + return ICE_ERR_NOT_SUPPORTED;
>
> - /* set node type */
> - cmd->addr.topo_params.node_type_ctx |=
> - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
> + if (node_handle)
> + *node_handle =
> +
> LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
> + if (node_part_number)
> + *node_part_number =
> desc.params.get_link_topo.node_part_num;
>
> - return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
> + return ICE_SUCCESS;
> +}
> +
> +#define MAX_NETLIST_SIZE 10
> +/**
> + * ice_find_netlist_node
> + * @hw: pointer to the hw struct
> + * @node_type_ctx: type of netlist node to look for
> + * @node_part_number: node part number to look for
> + * @node_handle: output parameter if node found - optional
> + *
> + * Find and return the node handle for a given node type and part
> +number in the
> + * netlist. When found ICE_SUCCESS is returned,
> ICE_ERR_DOES_NOT_EXIST
> + * otherwise. If node_handle provided, it would be set to found node
> handle.
> + */
> +enum ice_status
> +ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8
> node_part_number,
> + u16 *node_handle)
> +{
> + struct ice_aqc_get_link_topo cmd;
> + u8 rec_node_part_number;
> + u16 rec_node_handle;
> + u8 idx;
> +
> + for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
> + enum ice_status status;
> +
> + memset(&cmd, 0, sizeof(cmd));
> +
> + cmd.addr.topo_params.node_type_ctx =
> + (node_type_ctx <<
> ICE_AQC_LINK_TOPO_NODE_TYPE_S);
> + cmd.addr.topo_params.index = idx;
> +
> + status = ice_aq_get_netlist_node(hw, &cmd,
> + &rec_node_part_number,
> + &rec_node_handle);
> + if (status)
> + return status;
> +
> + if (rec_node_part_number == node_part_number) {
> + if (node_handle)
> + *node_handle = rec_node_handle;
> + return ICE_SUCCESS;
> + }
> + }
> +
> + return ICE_ERR_DOES_NOT_EXIST;
> }
>
> /**
> @@ -438,13 +504,27 @@ ice_aq_get_link_topo_handle(struct ice_port_info
> *pi, u8 node_type,
> */
> static bool ice_is_media_cage_present(struct ice_port_info *pi) {
> + struct ice_aqc_get_link_topo *cmd;
> + struct ice_aq_desc desc;
> +
> + cmd = &desc.params.get_link_topo;
> +
> + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
> +
> + cmd->addr.topo_params.node_type_ctx =
> + (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
> + ICE_AQC_LINK_TOPO_NODE_CTX_S);
> +
> + /* set node type */
> + cmd->addr.topo_params.node_type_ctx |=
> + (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
> + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
> +
> /* Node type cage can be used to determine if cage is present. If
> AQC
> * returns error (ENOENT), then no cage present. If no cage present
> then
> * connection type is backplane or BASE-T.
> */
> - return !ice_aq_get_link_topo_handle(pi,
> -
> ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
> - NULL);
> + return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
> }
>
> /**
> diff --git a/drivers/net/ice/base/ice_common.h
> b/drivers/net/ice/base/ice_common.h
> index a3cbf4fb05..1044a3088e 100644
> --- a/drivers/net/ice/base/ice_common.h
> +++ b/drivers/net/ice/base/ice_common.h
> @@ -145,6 +145,16 @@ enum ice_status
> ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8
> report_mode,
> struct ice_aqc_get_phy_caps_data *caps,
> struct ice_sq_cd *cd);
> +enum ice_status
> +ice_aq_get_netlist_node_pin(struct ice_hw *hw,
> + struct ice_aqc_get_link_topo_pin *cmd,
> + u16 *node_handle);
> +enum ice_status
> +ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo
> *cmd,
> + u8 *node_part_number, u16 *node_handle); enum
> ice_status
> +ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8
> node_part_number,
> + u16 *node_handle);
> void
> ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
> u16 link_speeds_bitmap);
> diff --git a/drivers/net/ice/base/ice_ptp_hw.c
> b/drivers/net/ice/base/ice_ptp_hw.c
> index 7e797c9511..5b366c95c5 100644
> --- a/drivers/net/ice/base/ice_ptp_hw.c
> +++ b/drivers/net/ice/base/ice_ptp_hw.c
> @@ -3095,10 +3095,10 @@ ice_ptp_port_cmd_e810(struct ice_hw *hw,
> enum ice_ptp_tmr_cmd cmd, static enum ice_status
> ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle) {
> - struct ice_aqc_get_link_topo *cmd;
> - struct ice_aq_desc desc;
> + struct ice_aqc_get_link_topo cmd;
> + u8 node_part_number, idx;
> enum ice_status status;
> - u8 idx;
> + u16 node_handle;
>
> if (!hw || !pca9575_handle)
> return ICE_ERR_PARAM;
> @@ -3109,12 +3109,10 @@ ice_get_pca9575_handle(struct ice_hw *hw,
> __le16 *pca9575_handle)
> return ICE_SUCCESS;
> }
>
> - /* If handle was not detected read it from the netlist */
> - cmd = &desc.params.get_link_topo;
> - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
> + memset(&cmd, 0, sizeof(cmd));
>
> /* Set node type to GPIO controller */
> - cmd->addr.topo_params.node_type_ctx =
> + cmd.addr.topo_params.node_type_ctx =
> (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
> ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
>
> @@ -3129,24 +3127,39 @@ ice_get_pca9575_handle(struct ice_hw *hw,
> __le16 *pca9575_handle)
> else
> return ICE_ERR_NOT_SUPPORTED;
>
> - cmd->addr.topo_params.index = idx;
> + cmd.addr.topo_params.index = idx;
>
> - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
> + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
> + &node_handle);
> if (status)
> return ICE_ERR_NOT_SUPPORTED;
>
> /* Verify if we found the right IO expander type */
> - if (desc.params.get_link_topo.node_part_num !=
> - ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575)
> + if (node_part_number !=
> ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575)
> return ICE_ERR_NOT_SUPPORTED;
>
> /* If present save the handle and return it */
> - hw->io_expander_handle = desc.params.get_link_topo.addr.handle;
> + hw->io_expander_handle = node_handle;
> *pca9575_handle = hw->io_expander_handle;
>
> return ICE_SUCCESS;
> }
>
> +/**
> + * ice_is_gps_present_e810t
> + * @hw: pointer to the hw struct
> + *
> + * Check if the GPS generic device is present in the netlist */ bool
> +ice_is_gps_present_e810t(struct ice_hw *hw) {
> + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
> +
> ICE_ACQ_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
> + return false;
> +
> + return true;
> +}
> +
> /**
> * ice_read_e810t_pca9575_reg
> * @hw: pointer to the hw struct
> diff --git a/drivers/net/ice/base/ice_ptp_hw.h
> b/drivers/net/ice/base/ice_ptp_hw.h
> index ee3366e83c..4f349593aa 100644
> --- a/drivers/net/ice/base/ice_ptp_hw.h
> +++ b/drivers/net/ice/base/ice_ptp_hw.h
> @@ -221,6 +221,7 @@ enum ice_status ice_phy_cfg_rx_offset_e822(struct
> ice_hw *hw, u8 port); enum ice_status ice_phy_exit_bypass_e822(struct
> ice_hw *hw, u8 port);
>
> /* E810 family functions */
> +bool ice_is_gps_present_e810t(struct ice_hw *hw);
> enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw); enum
> ice_status ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8
> *data);
> --
> 2.31.1
Patch 01~12
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 02/70] net/ice/base: get NVM CSS Header length from the CSS Header
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
2022-08-15 7:30 ` [PATCH v2 01/70] net/ice/base: add netlist helper functions Qi Zhang
@ 2022-08-15 7:30 ` Qi Zhang
2022-08-15 7:30 ` [PATCH v2 03/70] net/ice/base: combine functions for VSI promisc Qi Zhang
` (68 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:30 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Paul Greenwalt
The CSS Header length is defined as ICE_CSS_HEADER_LENGTH. To
support changes in CSS Header length, calculate the CSS Header
length from the NVM CSS Header length field plus the Authentication
Header length.
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_nvm.c | 61 +++++++++++++++++++++++++++++----
drivers/net/ice/base/ice_type.h | 12 +++----
2 files changed, 59 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index 7860006206..ad2496e873 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -350,6 +350,42 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
return status;
}
+/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ */
+static enum ice_status
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ enum ice_status status;
+ u32 hdr_len_dword;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
* @hw: pointer to the HW structure
@@ -363,7 +399,16 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
static enum ice_status
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ enum ice_status status;
+ u32 hdr_len;
+
+ status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ice_read_nvm_module(hw, bank, hdr_len + offset, data);
}
/**
@@ -633,22 +678,26 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
*/
static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
+ u32 orom_size_word = hw->flash.banks.orom_size / 2;
enum ice_status status;
u16 srev_l, srev_h;
u32 css_start;
+ u32 hdr_len;
- if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) {
+ status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ if (orom_size_word < hdr_len) {
ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n",
hw->flash.banks.orom_size);
return ICE_ERR_CFG;
}
/* calculate how far into the Option ROM the CSS header starts. Note
- * that ice_read_orom_module takes a word offset so we need to
- * divide by 2 here.
+ * that ice_read_orom_module takes a word offset
*/
- css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2;
-
+ css_start = orom_size_word - hdr_len;
status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l);
if (status)
return status;
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index d81984633a..d4d0cab089 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1419,17 +1419,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET ROUND_UP(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 03/70] net/ice/base: combine functions for VSI promisc
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
2022-08-15 7:30 ` [PATCH v2 01/70] net/ice/base: add netlist helper functions Qi Zhang
2022-08-15 7:30 ` [PATCH v2 02/70] net/ice/base: get NVM CSS Header length from the CSS Header Qi Zhang
@ 2022-08-15 7:30 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 04/70] net/ice/base: make function names more generic Qi Zhang
` (67 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:30 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Wiktor Pilarczyk
Remove ice_get_vsi_vlan_promisc,
cause of similar implementation as ice_get_vsi_promisc,
which will now handle the use case of ice_get_vsi_vlan_promisc.
Signed-off-by: Wiktor Pilarczyk <wiktor.pilarczyk@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 58 ++++++-------------------------
1 file changed, 11 insertions(+), 47 deletions(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index c0df3a1815..513623a0a4 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -5825,22 +5825,25 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
* @sw: pointer to switch info struct for which function add rule
+ * @lkup: switch rule filter lookup type
*/
static enum ice_status
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
- u16 *vid, struct ice_switch_info *sw)
+ u16 *vid, struct ice_switch_info *sw,
+ enum ice_sw_lkup_type lkup)
{
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- if (!ice_is_vsi_valid(hw, vsi_handle))
+ if (!ice_is_vsi_valid(hw, vsi_handle) ||
+ (lkup != ICE_SW_LKUP_PROMISC && lkup != ICE_SW_LKUP_PROMISC_VLAN))
return ICE_ERR_PARAM;
*vid = 0;
*promisc_mask = 0;
- rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
+ rule_head = &sw->recp_list[lkup].filt_rules;
+ rule_lock = &sw->recp_list[lkup].filt_rule_lock;
ice_acquire_lock(rule_lock);
LIST_FOR_EACH_ENTRY(itr, rule_head,
@@ -5870,47 +5873,7 @@ ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
- vid, hw->switch_info);
-}
-
-/**
- * _ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
- * @hw: pointer to the hardware structure
- * @vsi_handle: VSI handle to retrieve info from
- * @promisc_mask: pointer to mask to be filled in
- * @vid: VLAN ID of promisc VLAN VSI
- * @sw: pointer to switch info struct for which function add rule
- */
-static enum ice_status
-_ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
- u16 *vid, struct ice_switch_info *sw)
-{
- struct ice_fltr_mgmt_list_entry *itr;
- struct LIST_HEAD_TYPE *rule_head;
- struct ice_lock *rule_lock; /* Lock to protect filter rule list */
-
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
-
- *vid = 0;
- *promisc_mask = 0;
- rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
-
- ice_acquire_lock(rule_lock);
- LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
- list_entry) {
- /* Continue if this filter doesn't apply to this VSI or the
- * VSI ID is not in the VSI map for this filter
- */
- if (!ice_vsi_uses_fltr(itr, vsi_handle))
- continue;
-
- *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
- }
- ice_release_lock(rule_lock);
-
- return ICE_SUCCESS;
+ vid, hw->switch_info, ICE_SW_LKUP_PROMISC);
}
/**
@@ -5924,8 +5887,9 @@ enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
- return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
- vid, hw->switch_info);
+ return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
+ vid, hw->switch_info,
+ ICE_SW_LKUP_PROMISC_VLAN);
}
/**
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 04/70] net/ice/base: make function names more generic
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (2 preceding siblings ...)
2022-08-15 7:30 ` [PATCH v2 03/70] net/ice/base: combine functions for VSI promisc Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 05/70] net/ice/base: fix incorrect division during E822 PTP init Qi Zhang
` (66 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Arkadiusz Kubalewski
Previously "e810t" was part of few function names.
In the future it will require to add similar functions
for different NIC types.
Make "NIC type" a suffix of the function name.
Signed-off-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 12 ++++++------
drivers/net/ice/base/ice_ptp_hw.h | 6 +++---
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 5b366c95c5..632a3f5bae 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -3161,7 +3161,7 @@ bool ice_is_gps_present_e810t(struct ice_hw *hw)
}
/**
- * ice_read_e810t_pca9575_reg
+ * ice_read_pca9575_reg_e810t
* @hw: pointer to the hw struct
* @offset: GPIO controller register offset
* @data: pointer to data to be read from the GPIO controller
@@ -3169,7 +3169,7 @@ bool ice_is_gps_present_e810t(struct ice_hw *hw)
* Read the register from the GPIO controller
*/
enum ice_status
-ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
{
struct ice_aqc_link_topo_addr link_topo;
enum ice_status status;
@@ -3191,7 +3191,7 @@ ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
}
/**
- * ice_write_e810t_pca9575_reg
+ * ice_write_pca9575_reg_e810t
* @hw: pointer to the hw struct
* @offset: GPIO controller register offset
* @data: data to be written to the GPIO controller
@@ -3199,7 +3199,7 @@ ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
* Write the data to the GPIO controller register
*/
enum ice_status
-ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data)
+ice_write_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 data)
{
struct ice_aqc_link_topo_addr link_topo;
enum ice_status status;
@@ -3283,12 +3283,12 @@ enum ice_status ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
- * ice_e810t_is_pca9575_present
+ * ice_is_pca9575_present
* @hw: pointer to the hw struct
*
* Check if the SW IO expander is present in the netlist
*/
-bool ice_e810t_is_pca9575_present(struct ice_hw *hw)
+bool ice_is_pca9575_present(struct ice_hw *hw)
{
enum ice_status status;
__le16 handle = 0;
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index 4f349593aa..d27815fd94 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -224,12 +224,12 @@ enum ice_status ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port);
bool ice_is_gps_present_e810t(struct ice_hw *hw);
enum ice_status ice_ptp_init_phy_e810(struct ice_hw *hw);
enum ice_status
-ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
+ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
enum ice_status
-ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data);
+ice_write_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 data);
enum ice_status ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
enum ice_status ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
-bool ice_e810t_is_pca9575_present(struct ice_hw *hw);
+bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 05/70] net/ice/base: fix incorrect division during E822 PTP init
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (3 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 04/70] net/ice/base: make function names more generic Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 06/70] net/ice/base: added auto drop blocking packets functionality Qi Zhang
` (65 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Jacob Keller
When initializing the device hardware for PTP, the E822 devices
requirea number of values to be calculated and programmed to
hardware.These values are calculated using unsigned 64-bit
division.
The DIV_64BIT macro currently translates into a specific Linux
functionthat triggers a *signed* division. This produces incorrect
results when operating on a dividend larger than an s64. The
division calculation effectively overflows and results in totally
unexpected behavior.
In this case, the UIX value for 10Gb/40Gb link speeds are calculated
incorrectly. This ultimately cascades into a failure of the Tx
timestamps. Specifically, the reported Tx timestamps become wildly
inaccurate and not representing nominal time.
The root cause of this bug is the assumption that DIV_64BIT can
correctly handle both signed and unsigned division. In fact the
entire reason we need this is because the Linux kernel compilation
target does not provide native 64 bit division ops, and requires
explicit use of kernel functions which explicitly do either signed
or unsigned division.
To correctly solve this, introduce new functions, DIV_U64 and
DIV_S64 which are specifically intended for signed or unsigned
division. To help catch issues, use static inline functions so
that we get strict type checking.
Fixes: 97f4f78bbd9f ("net/ice/base: add functions for device clock control")
Cc: stable@dpdk.org
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 56 +++++++++++++++----------------
drivers/net/ice/base/ice_sched.c | 24 ++++++-------
drivers/net/ice/base/ice_type.h | 30 +++++++++++++++--
3 files changed, 68 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 632a3f5bae..76119364e4 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -1634,7 +1634,7 @@ static enum ice_status ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
/* Program the 10Gb/40Gb conversion ratio */
- uix = DIV_64BIT(tu_per_sec * LINE_UI_10G_40G, 390625000);
+ uix = DIV_U64(tu_per_sec * LINE_UI_10G_40G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
uix);
@@ -1645,7 +1645,7 @@ static enum ice_status ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
}
/* Program the 25Gb/100Gb conversion ratio */
- uix = DIV_64BIT(tu_per_sec * LINE_UI_25G_100G, 390625000);
+ uix = DIV_U64(tu_per_sec * LINE_UI_25G_100G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
uix);
@@ -1727,8 +1727,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_par_clk)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].tx_par_clk);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
else
phy_tus = 0;
@@ -1739,8 +1739,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_par_clk)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].rx_par_clk);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
else
phy_tus = 0;
@@ -1751,8 +1751,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_pcs_clk)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].tx_pcs_clk);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
else
phy_tus = 0;
@@ -1763,8 +1763,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_pcs_clk)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].rx_pcs_clk);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
else
phy_tus = 0;
@@ -1775,8 +1775,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_DESK_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_par)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].tx_desk_rsgb_par);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
else
phy_tus = 0;
@@ -1787,8 +1787,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_DESK_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_par)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].rx_desk_rsgb_par);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
else
phy_tus = 0;
@@ -1799,8 +1799,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_DESK_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].tx_desk_rsgb_pcs);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
else
phy_tus = 0;
@@ -1811,8 +1811,8 @@ static enum ice_status ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
/* P_REG_DESK_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
- phy_tus = DIV_64BIT(tu_per_sec,
- e822_vernier[link_spd].rx_desk_rsgb_pcs);
+ phy_tus = DIV_U64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
else
phy_tus = 0;
@@ -1844,9 +1844,9 @@ ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = DIV_64BIT(tu_per_sec, 10000);
+ fixed_offset = DIV_U64(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
- fixed_offset = DIV_64BIT(fixed_offset, 10000000);
+ fixed_offset = DIV_U64(fixed_offset, 10000000);
return fixed_offset;
}
@@ -2074,9 +2074,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
* divide by 125, and then handle remaining divisor based on the link
* speed pmd_adj_divisor value.
*/
- adj = DIV_64BIT(tu_per_sec, 125);
+ adj = DIV_U64(tu_per_sec, 125);
adj *= mult;
- adj = DIV_64BIT(adj, pmd_adj_divisor);
+ adj = DIV_U64(adj, pmd_adj_divisor);
/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
* cycle count is necessary.
@@ -2097,9 +2097,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
if (rx_cycle) {
mult = (4 - rx_cycle) * 40;
- cycle_adj = DIV_64BIT(tu_per_sec, 125);
+ cycle_adj = DIV_U64(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
+ cycle_adj = DIV_U64(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
@@ -2119,9 +2119,9 @@ ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
if (rx_cycle) {
mult = rx_cycle * 40;
- cycle_adj = DIV_64BIT(tu_per_sec, 125);
+ cycle_adj = DIV_U64(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
+ cycle_adj = DIV_U64(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
@@ -2157,9 +2157,9 @@ ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = DIV_64BIT(tu_per_sec, 10000);
+ fixed_offset = DIV_U64(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
- fixed_offset = DIV_64BIT(fixed_offset, 10000000);
+ fixed_offset = DIV_U64(fixed_offset, 10000000);
return fixed_offset;
}
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 1b060d3567..71b5677f43 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -3916,8 +3916,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
u16 wakeup = 0;
/* Get the wakeup integer value */
- bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
- wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec);
+ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
+ wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
} else {
@@ -3925,18 +3925,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
* Convert Integer value to a constant multiplier
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
- wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER *
- hw->psm_clk_freq, bytes_per_sec);
+ wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER *
+ hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
wakeup_f = wakeup_a - wakeup_b;
/* Round up the Fractional value via Ceil(Fractional value) */
- if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2))
+ if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2))
wakeup_f += 1;
- wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION,
- ICE_RL_PROF_MULTIPLIER);
+ wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION,
+ ICE_RL_PROF_MULTIPLIER);
wakeup |= (u16)(wakeup_int << 9);
wakeup |= (u16)(0x1ff & wakeup_f_int);
}
@@ -3968,20 +3968,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
return status;
/* Bytes per second from Kbps */
- bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
+ bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
/* encode is 6 bits but really useful are 5 bits */
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
- ts_rate = DIV_64BIT((s64)hw->psm_clk_freq,
- pow_result * ICE_RL_PROF_TS_MULTIPLIER);
+ ts_rate = DIV_S64((s64)hw->psm_clk_freq,
+ pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
/* Multiplier value */
- mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
- ts_rate);
+ mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
+ ts_rate);
/* Round to the nearest ICE_RL_PROF_MULTIPLIER */
mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index d4d0cab089..3da3de38af 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -87,11 +87,37 @@ static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc)
return ice_is_bit_set(&bitmap, tc);
}
-#define DIV_64BIT(n, d) ((n) / (d))
+/**
+ * DIV_S64 - Divide signed 64-bit value with signed 64-bit divisor
+ * @dividend: value to divide
+ * @divisor: value to divide by
+ *
+ * Use DIV_S64 for any 64-bit divide which operates on signed 64-bit dividends.
+ * Do not use this for unsigned 64-bit dividends as it will not produce
+ * correct results if the dividend is larger than S64_MAX.
+ */
+static inline s64 DIV_S64(s64 dividend, s64 divisor)
+{
+ return dividend / divisor;
+}
+
+/**
+ * DIV_U64 - Divide unsigned 64-bit value by unsigned 64-bit divisor
+ * @dividend: value to divide
+ * @divisor: value to divide by
+ *
+ * Use DIV_U64 for any 64-bit divide which operates on unsigned 64-bit
+ * dividends. Do not use this for signed 64-bit dividends as it will not
+ * handle negative values correctly.
+ */
+static inline u64 DIV_U64(u64 dividend, u64 divisor)
+{
+ return dividend / divisor;
+}
static inline u64 round_up_64bit(u64 a, u32 b)
{
- return DIV_64BIT(((a) + (b) / 2), (b));
+ return DIV_U64(((a) + (b) / 2), (b));
}
static inline u32 ice_round_to_num(u32 N, u32 R)
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 06/70] net/ice/base: added auto drop blocking packets functionality
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (4 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 05/70] net/ice/base: fix incorrect division during E822 PTP init Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 07/70] net/ice/base: fix 100M speed Qi Zhang
` (64 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Mateusz Rusinski
Extended ice_aq_set_mac_cfg()function to add support for
auto drop blocking packets.
Signed-off-by: Mateusz Rusinski <mateusz.rusinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 22 ++++++++++++++++++++--
drivers/net/ice/base/ice_common.h | 5 ++++-
drivers/net/ice/base/ice_type.h | 6 ++++++
drivers/net/ice/ice_ethdev.c | 2 +-
4 files changed, 31 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index edc24030ec..f9640d9403 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -788,12 +788,14 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* ice_aq_set_mac_cfg
* @hw: pointer to the HW struct
* @max_frame_size: Maximum Frame Size to be supported
+ * @auto_drop: Tell HW to drop packets if TC queue is blocked
* @cd: pointer to command details structure or NULL
*
* Set MAC configuration (0x0603)
*/
enum ice_status
-ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
+ struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
struct ice_aq_desc desc;
@@ -807,6 +809,8 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+ if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
+ cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
ice_fill_tx_timer_and_fc_thresh(hw, cmd);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@@ -1106,7 +1110,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_fltr_mgmt_struct;
/* enable jumbo frame support at MAC level */
- status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
+ NULL);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -5921,3 +5926,16 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
}
return false;
}
+/**
+ * ice_is_fw_auto_drop_supported
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports auto drop feature
+ */
+bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
+ hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
+ return true;
+ return false;
+}
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 1044a3088e..1051cc1176 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -191,7 +191,8 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
-ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
+ struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
@@ -289,4 +290,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+/* AQ API version for FW auto drop reports */
+bool ice_is_fw_auto_drop_supported(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 3da3de38af..15b12bfc8d 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1539,5 +1539,11 @@ struct ice_aq_get_set_rss_lut_params {
/* AQ API version for report default configuration */
#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
+
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for FW auto drop reports */
+#define ICE_FW_API_AUTO_DROP_MAJ 1
+#define ICE_FW_API_AUTO_DROP_MIN 4
+
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 587b01cf23..2e522376e3 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -3662,7 +3662,7 @@ ice_dev_start(struct rte_eth_dev *dev)
ICE_FRAME_SIZE_MAX;
/* Set the max frame size to HW*/
- ice_aq_set_mac_cfg(hw, max_frame_size, NULL);
+ ice_aq_set_mac_cfg(hw, max_frame_size, false, NULL);
if (ad->devargs.pps_out_ena) {
ret = ice_pps_out_cfg(hw, pin_idx, timer);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 07/70] net/ice/base: fix 100M speed
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (5 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 06/70] net/ice/base: added auto drop blocking packets functionality Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 08/70] net/ice/base: support VXLAN and GRE for RSS Qi Zhang
` (63 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Anirudh Venkataramanan
Couple of 10GBase-T devices don't support advertising 100M
speed. For these devices, ice_is_100m_speed_supported should
return false. Meanwhile add device that supports 100M speed.
Fixes: 486d29fda54c ("net/ice/base: add dedicate MAC type for E810")
Cc: stable@dpdk.org
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index f9640d9403..e22600c46d 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -3113,12 +3113,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool ice_is_100m_speed_supported(struct ice_hw *hw)
{
switch (hw->device_id) {
- case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
- case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
return true;
default:
return false;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 08/70] net/ice/base: support VXLAN and GRE for RSS
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (6 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 07/70] net/ice/base: fix 100M speed Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 09/70] net/ice/base: fix DSCP PFC TLV creation Qi Zhang
` (62 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jie Wang
Add RSS of inner headers for VXLAN tunnel packet.
Add packet types for packets with outer IPv4/IPv6 header
support GRE and VXLAN tunnel packet.
Following rules can use new packet types:
- eth / ipv4(6) / udp / vxlan / ipv4(6)
- eth / ipv4(6) / udp / vxlan / ipv4(6) / tcp
- eth / ipv4(6) / udp / vxlan / ipv4(6) / udp
- eth / ipv4(6) / gre / ipv4(6)
- eth / ipv4(6) / gre / ipv4(6) / tcp
- eth / ipv4(6) / gre / ipv4(6) / udp
Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flow.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index d7eecc0d54..bdb584c7f5 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -262,7 +262,7 @@ static const u32 ice_ptypes_macvlan_il[] = {
* does NOT include IPV4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1D800000, 0x24000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
0x00001500, 0x00000000, 0x00000000, 0x00000000,
@@ -316,8 +316,8 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
* includes IPV6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
- 0x00000000, 0x00000000, 0x76000000, 0x1EFDE000,
- 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
+ 0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -985,8 +985,9 @@ struct ice_flow_prof_params {
ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP | ICE_FLOW_SEG_HDR_GTPU_NON_IP | \
+ ICE_FLOW_SEG_HDR_VXLAN | ICE_FLOW_SEG_HDR_GRE | \
ICE_FLOW_SEG_HDR_ECPRI_TP0 | ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0 | \
- ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP | ICE_FLOW_SEG_HDR_GRE)
+ ICE_FLOW_SEG_HDR_L2TPV2 | ICE_FLOW_SEG_HDR_PPP)
#define ICE_FLOW_SEG_HDRS_L2_MASK \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 09/70] net/ice/base: fix DSCP PFC TLV creation
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (7 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 08/70] net/ice/base: support VXLAN and GRE for RSS Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 10/70] net/ice/base: complete the health status codes Qi Zhang
` (61 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Dave Ertman
When creating the TLV to send to the FW for configuring DSCP
mode PFC, the PFCENABLE field was being masked with a 4 bit
mask (0xF), but this is an 8 bit bitmask for enabled classes
for PFC. This means that traffic classes 4-7 could not be
enabled for PFC.
Remove the mask completely, as it is not necessary, as we are
assigning 8bits to an 8 bit field.
Fixes: 8ea78b169603 ("net/ice/base: support L3 DSCP QoS")
Cc: stable@dpdk.org
Signed-off-by: Dave Ertman <david.m.ertman@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_dcb.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index cb6c5ba182..3d630757f8 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -1376,7 +1376,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = HTONL(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcena & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
}
/**
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 10/70] net/ice/base: complete the health status codes
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (8 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 09/70] net/ice/base: fix DSCP PFC TLV creation Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 11/70] net/ice/base: explicitly name E822 HW-dependent functions Qi Zhang
` (60 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Leszek Zygo
add definitions for async health status codes.
Signed-off-by: Leszek Zygo <leszek.zygo@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index a3add411b8..517af4b6ef 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -2821,6 +2821,7 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
#define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
#define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
#define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
@@ -2842,7 +2843,16 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
#define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
/* Get Health Status codes (indirect 0xFF21) */
struct ice_aqc_get_supported_health_status_codes {
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 11/70] net/ice/base: explicitly name E822 HW-dependent functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (9 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 10/70] net/ice/base: complete the health status codes Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 12/70] net/ice/base: move code block Qi Zhang
` (59 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Add the suffix to E822 HW-dependent function names
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 23 ++++++++++++-----------
drivers/net/ice/base/ice_ptp_hw.h | 7 ++++---
2 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 76119364e4..23d90b127d 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -1268,7 +1268,7 @@ ice_ptp_prep_phy_adj_target_e822(struct ice_hw *hw, u32 target_time)
}
/**
- * ice_ptp_read_port_capture - Read a port's local time capture
+ * ice_ptp_read_port_capture_e822 - Read a port's local time capture
* @hw: pointer to HW struct
* @port: Port number to read
* @tx_ts: on return, the Tx port time capture
@@ -1279,7 +1279,8 @@ ice_ptp_prep_phy_adj_target_e822(struct ice_hw *hw, u32 target_time)
* Note this has no equivalent for the E810 devices.
*/
enum ice_status
-ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+ice_ptp_read_port_capture_e822(struct ice_hw *hw, u8 port, u64 *tx_ts,
+ u64 *rx_ts)
{
enum ice_status status;
@@ -1309,7 +1310,7 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * ice_ptp_one_port_cmd_e822 - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
@@ -1321,8 +1322,8 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
* always handles all external PHYs internally.
*/
enum ice_status
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
- bool lock_sbq)
+ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
+ bool lock_sbq)
{
enum ice_status status;
u32 cmd_val, val;
@@ -1416,7 +1417,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
enum ice_status status;
- status = ice_ptp_one_port_cmd(hw, port, cmd, lock_sbq);
+ status = ice_ptp_one_port_cmd_e822(hw, port, cmd, lock_sbq);
if (status)
return status;
}
@@ -2318,7 +2319,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
ice_ptp_src_cmd(hw, READ_TIME);
/* Prepare the PHY timer for a READ_TIME capture command */
- status = ice_ptp_one_port_cmd(hw, port, READ_TIME, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, READ_TIME, true);
if (status)
return status;
@@ -2331,7 +2332,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
- status = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
+ status = ice_ptp_read_port_capture_e822(hw, port, &tx_time, &rx_time);
if (status)
return status;
@@ -2388,7 +2389,7 @@ static enum ice_status ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (status)
goto err_unlock;
- status = ice_ptp_one_port_cmd(hw, port, ADJ_TIME, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, ADJ_TIME, true);
if (status)
goto err_unlock;
@@ -2513,7 +2514,7 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
if (status)
return status;
- status = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, INIT_INCVAL, true);
if (status)
return status;
@@ -2538,7 +2539,7 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
if (status)
return status;
- status = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, INIT_INCVAL, true);
if (status)
return status;
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index d27815fd94..9cc3436aa8 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -157,10 +157,11 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time,
enum ice_status
ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval);
enum ice_status
-ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts);
+ice_ptp_read_port_capture_e822(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts);
enum ice_status
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
- bool lock_sbq);
+ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd, bool lock_sbq);
enum ice_status
ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
enum ice_clk_src clk_src);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 12/70] net/ice/base: move code block
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (10 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 11/70] net/ice/base: explicitly name E822 HW-dependent functions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 6:30 ` Yang, Qiming
2022-08-15 7:31 ` [PATCH v2 13/70] net/ice/base: add PHY 56G destination address Qi Zhang
` (58 subsequent siblings)
70 siblings, 1 reply; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Move some code block to the beginning of ice_ptp_hw.c to align
withkernel driver.
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 997 +++++++++++++++---------------
1 file changed, 498 insertions(+), 499 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 23d90b127d..22d0774dd7 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -101,6 +101,286 @@ u64 ice_ptp_read_src_incval(struct ice_hw *hw)
return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
}
+/**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static enum ice_status
+ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ enum ice_status status;
+
+ cgu_msg.opcode = ice_sbq_msg_rd;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+
+ status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, status %d\n",
+ addr, status);
+ return status;
+ }
+
+ *val = cgu_msg.data;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static enum ice_status
+ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ enum ice_status status;
+
+ cgu_msg.opcode = ice_sbq_msg_wr;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+ cgu_msg.data = val;
+
+ status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, status %d\n",
+ addr, status);
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+ switch ((enum ice_time_ref_freq)clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+ switch ((enum ice_clk_src)clk_src) {
+ case ICE_CLK_SRC_TCX0:
+ return "TCX0";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+enum ice_status
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ enum ice_status status;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ ice_warn(hw, "Invalid TIME_REF frequency %u\n", clk_freq);
+ return ICE_ERR_PARAM;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ ice_warn(hw, "Invalid clock source %u\n", clk_src);
+ return ICE_ERR_PARAM;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCX0 &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ ice_warn(hw, "TCX0 only supports 25 MHz frequency\n");
+ return ICE_ERR_PARAM;
+ }
+
+ status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (status)
+ return status;
+
+ status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (status)
+ return status;
+
+ status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (status)
+ return status;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.field.ts_pll_enable) {
+ dw24.field.ts_pll_enable = 0;
+
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (status)
+ return status;
+ }
+
+ /* Set the frequency */
+ dw9.field.time_ref_freq_sel = clk_freq;
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ if (status)
+ return status;
+
+ /* Configure the TS PLL feedback divisor */
+ status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (status)
+ return status;
+
+ dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.field.tspll_ndivratio = 1;
+
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ if (status)
+ return status;
+
+ /* Configure the TS PLL post divisor */
+ status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (status)
+ return status;
+
+ dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.field.time1588clk_sel_div2 = 0;
+
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ if (status)
+ return status;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (status)
+ return status;
+
+ dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.field.time_ref_sel = clk_src;
+
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (status)
+ return status;
+
+ /* Finally, enable the PLL */
+ dw24.field.ts_pll_enable = 1;
+
+ status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (status)
+ return status;
+
+ /* Wait to verify if the PLL locks */
+ ice_msec_delay(1, true);
+
+ status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (status)
+ return status;
+
+ if (!bwm_lf.field.plllock_true_lock_cri) {
+ ice_warn(hw, "CGU PLL failed to lock\n");
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static enum ice_status ice_init_cgu_e822(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ union tspll_cntr_bist_settings cntr_bist;
+ enum ice_status status;
+
+ status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (status)
+ return status;
+
+ /* Disable sticky lock detection so lock status reported is accurate */
+ cntr_bist.field.i_plllock_sel_0 = 0;
+ cntr_bist.field.i_plllock_sel_1 = 0;
+
+ status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+ if (status)
+ return status;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ if (status)
+ return status;
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
* @hw: pointer to HW struct
@@ -346,261 +626,59 @@ ice_read_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
* The high offset is looked up. This function only operates on registers
* known to be two parts of a 64bit value.
*/
-static enum ice_status
-ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
-{
- enum ice_status status;
- u32 low, high;
- u16 high_addr;
-
- /* Only operate on registers known to be split into two 32bit
- * registers.
- */
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
- ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
- low_addr);
- return ICE_ERR_PARAM;
- }
-
- status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d",
- low_addr, status);
- return status;
- }
-
- status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d",
- high_addr, status);
- return status;
- }
-
- *val = (u64)high << 32 | low;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_write_phy_reg_e822_lp - Write a PHY register
- * @hw: pointer to the HW struct
- * @port: PHY port to write to
- * @offset: PHY register offset to write
- * @val: The value to write to the register
- * @lock_sbq: true if the sideband queue lock must be acquired
- *
- * Write a PHY register for the given port over the device sideband queue.
- */
-static enum ice_status
-ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
- bool lock_sbq)
-{
- struct ice_sbq_msg_input msg = {0};
- enum ice_status status;
-
- ice_fill_phy_msg_e822(&msg, port, offset);
- msg.opcode = ice_sbq_msg_wr;
- msg.data = val;
-
- status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
- status);
- return status;
- }
-
- return ICE_SUCCESS;
-}
-
-enum ice_status
-ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
-{
- return ice_write_phy_reg_e822_lp(hw, port, offset, val, true);
-}
-
-/**
- * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
- * @hw: pointer to the HW struct
- * @port: port to write to
- * @low_addr: offset of the low register
- * @val: 40b value to write
- *
- * Write the provided 40b value to the two associated registers by splitting
- * it up into two chunks, the lower 8 bits and the upper 32 bits.
- */
-static enum ice_status
-ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
-{
- enum ice_status status;
- u32 low, high;
- u16 high_addr;
-
- /* Only operate on registers known to be split into a lower 8 bit
- * register and an upper 32 bit register.
- */
- if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
- ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
- low_addr);
- return ICE_ERR_PARAM;
- }
-
- low = (u32)(val & P_REG_40B_LOW_M);
- high = (u32)(val >> P_REG_40B_HIGH_S);
-
- status = ice_write_phy_reg_e822(hw, port, low_addr, low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
- low_addr, status);
- return status;
- }
-
- status = ice_write_phy_reg_e822(hw, port, high_addr, high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
- high_addr, status);
- return status;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
- * @hw: pointer to the HW struct
- * @port: PHY port to read from
- * @low_addr: offset of the lower register to read from
- * @val: the contents of the 64bit value to write to PHY
- *
- * Write the 64bit value to the two associated 32bit PHY registers. The offset
- * is always specified as the lower register, and the high address is looked
- * up. This function only operates on registers known to be two parts of
- * a 64bit value.
- */
-static enum ice_status
-ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
-{
- enum ice_status status;
- u32 low, high;
- u16 high_addr;
-
- /* Only operate on registers known to be split into two 32bit
- * registers.
- */
- if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
- ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
- low_addr);
- return ICE_ERR_PARAM;
- }
-
- low = ICE_LO_DWORD(val);
- high = ICE_HI_DWORD(val);
-
- status = ice_write_phy_reg_e822(hw, port, low_addr, low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
- low_addr, status);
- return status;
- }
-
- status = ice_write_phy_reg_e822(hw, port, high_addr, high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
- high_addr, status);
- return status;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_fill_quad_msg_e822 - Fill message data for quad register access
- * @msg: the PHY message buffer to fill in
- * @quad: the quad to access
- * @offset: the register offset
- *
- * Fill a message buffer for accessing a register in a quad shared between
- * multiple PHYs.
- */
-static void
-ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
-{
- u32 addr;
-
- msg->dest_dev = rmn_0;
-
- if ((quad % ICE_NUM_QUAD_TYPE) == 0)
- addr = Q_0_BASE + offset;
- else
- addr = Q_1_BASE + offset;
-
- msg->msg_addr_low = ICE_LO_WORD(addr);
- msg->msg_addr_high = ICE_HI_WORD(addr);
-}
-
-/**
- * ice_read_quad_reg_e822_lp - Read a PHY quad register
- * @hw: pointer to the HW struct
- * @quad: quad to read from
- * @offset: quad register offset to read
- * @val: on return, the contents read from the quad
- * @lock_sbq: true if the sideband queue lock must be acquired
- *
- * Read a quad register over the device sideband queue. Quad registers are
- * shared between multiple PHYs.
- */
-static enum ice_status
-ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 *val,
- bool lock_sbq)
+static enum ice_status
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
{
- struct ice_sbq_msg_input msg = {0};
enum ice_status status;
+ u32 low, high;
+ u16 high_addr;
- if (quad >= ICE_MAX_QUAD)
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
return ICE_ERR_PARAM;
+ }
- ice_fill_quad_msg_e822(&msg, quad, offset);
- msg.opcode = ice_sbq_msg_rd;
+ status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, status %d",
+ low_addr, status);
+ return status;
+ }
- status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+ status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
- status);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, status %d",
+ high_addr, status);
return status;
}
- *val = msg.data;
+ *val = (u64)high << 32 | low;
return ICE_SUCCESS;
}
-enum ice_status
-ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
-{
- return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true);
-}
-
/**
- * ice_write_quad_reg_e822_lp - Write a PHY quad register
+ * ice_write_phy_reg_e822_lp - Write a PHY register
* @hw: pointer to the HW struct
- * @quad: quad to write to
- * @offset: quad register offset to write
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
* @val: The value to write to the register
* @lock_sbq: true if the sideband queue lock must be acquired
*
- * Write a quad register over the device sideband queue. Quad registers are
- * shared between multiple PHYs.
+ * Write a PHY register for the given port over the device sideband queue.
*/
static enum ice_status
-ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val,
- bool lock_sbq)
+ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
+ bool lock_sbq)
{
struct ice_sbq_msg_input msg = {0};
enum ice_status status;
- if (quad >= ICE_MAX_QUAD)
- return ICE_ERR_PARAM;
-
- ice_fill_quad_msg_e822(&msg, quad, offset);
+ ice_fill_phy_msg_e822(&msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -615,84 +693,51 @@ ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val,
}
enum ice_status
-ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
- return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true);
+ return ice_write_phy_reg_e822_lp(hw, port, offset, val, true);
}
/**
- * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
* @hw: pointer to the HW struct
- * @quad: the quad to read from
- * @idx: the timestamp index to read
- * @tstamp: on return, the 40bit timestamp value
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
*
- * Read a 40bit timestamp value out of the two associated registers in the
- * quad memory block that is shared between the internal PHYs of the E822
- * family of devices.
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
*/
static enum ice_status
-ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
enum ice_status status;
- u16 lo_addr, hi_addr;
- u32 lo, hi;
-
- lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
- hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
-
- status = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
- status);
- return status;
- }
-
- status = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
- status);
- return status;
- }
+ u32 low, high;
+ u16 high_addr;
- /* For E822 based internal PHYs, the timestamp is reported with the
- * lower 8 bits in the low register, and the upper 32 bits in the high
- * register.
+ /* Only operate on registers known to be split into a lower 8 bit
+ * register and an upper 32 bit register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
- * @hw: pointer to the HW struct
- * @quad: the quad to read from
- * @idx: the timestamp index to reset
- *
- * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
- * shared between the internal PHYs on the E822 devices.
- */
-static enum ice_status
-ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
-{
- enum ice_status status;
- u16 lo_addr, hi_addr;
+ if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+ low_addr);
+ return ICE_ERR_PARAM;
+ }
- lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
- hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+ low = (u32)(val & P_REG_40B_LOW_M);
+ high = (u32)(val >> P_REG_40B_HIGH_S);
- status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ status = ice_write_phy_reg_e822(hw, port, low_addr, low);
if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
- status);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+ low_addr, status);
return status;
}
- status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ status = ice_write_phy_reg_e822(hw, port, high_addr, high);
if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
- status);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+ high_addr, status);
return status;
}
@@ -700,282 +745,236 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
}
/**
- * ice_read_cgu_reg_e822 - Read a CGU register
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
* @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
*
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
*/
static enum ice_status
-ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val)
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
{
- struct ice_sbq_msg_input cgu_msg;
enum ice_status status;
+ u32 low, high;
+ u16 high_addr;
- cgu_msg.opcode = ice_sbq_msg_rd;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return ICE_ERR_PARAM;
+ }
- status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+ low = ICE_LO_DWORD(val);
+ high = ICE_HI_DWORD(val);
+
+ status = ice_write_phy_reg_e822(hw, port, low_addr, low);
if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, status %d\n",
- addr, status);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+ low_addr, status);
return status;
}
- *val = cgu_msg.data;
-
- return status;
-}
-
-/**
- * ice_write_cgu_reg_e822 - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static enum ice_status
-ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg;
- enum ice_status status;
-
- cgu_msg.opcode = ice_sbq_msg_wr;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
- cgu_msg.data = val;
-
- status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
+ status = ice_write_phy_reg_e822(hw, port, high_addr, high);
if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, status %d\n",
- addr, status);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+ high_addr, status);
return status;
}
- return status;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Convert the specified TIME_REF clock frequency to a string.
- */
-static const char *ice_clk_freq_str(u8 clk_freq)
-{
- switch ((enum ice_time_ref_freq)clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
+ return ICE_SUCCESS;
}
/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
*
- * Convert the specified clock source to its string name.
- */
-static const char *ice_clk_src_str(u8 clk_src)
-{
- switch ((enum ice_clk_src)clk_src) {
- case ICE_CLK_SRC_TCX0:
- return "TCX0";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+ u32 addr;
+
+ msg->dest_dev = rmn_0;
+
+ if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ addr = Q_0_BASE + offset;
+ else
+ addr = Q_1_BASE + offset;
+
+ msg->msg_addr_low = ICE_LO_WORD(addr);
+ msg->msg_addr_high = ICE_HI_WORD(addr);
}
/**
- * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * ice_read_quad_reg_e822_lp - Read a PHY quad register
* @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ * @lock_sbq: true if the sideband queue lock must be acquired
*
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
*/
-enum ice_status
-ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
+static enum ice_status
+ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 *val,
+ bool lock_sbq)
{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
+ struct ice_sbq_msg_input msg = {0};
enum ice_status status;
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- ice_warn(hw, "Invalid TIME_REF frequency %u\n", clk_freq);
- return ICE_ERR_PARAM;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- ice_warn(hw, "Invalid clock source %u\n", clk_src);
- return ICE_ERR_PARAM;
- }
-
- if (clk_src == ICE_CLK_SRC_TCX0 &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- ice_warn(hw, "TCX0 only supports 25 MHz frequency\n");
+ if (quad >= ICE_MAX_QUAD)
return ICE_ERR_PARAM;
- }
-
- status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
- if (status)
- return status;
- status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
- if (status)
- return status;
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_rd;
- status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (status)
+ status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+ status);
return status;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.field.ts_pll_enable) {
- dw24.field.ts_pll_enable = 0;
-
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
- if (status)
- return status;
}
- /* Set the frequency */
- dw9.field.time_ref_freq_sel = clk_freq;
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
- if (status)
- return status;
+ *val = msg.data;
- /* Configure the TS PLL feedback divisor */
- status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
- if (status)
- return status;
+ return ICE_SUCCESS;
+}
- dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.field.tspll_ndivratio = 1;
+enum ice_status
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+ return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true);
+}
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
- if (status)
- return status;
+/**
+ * ice_write_quad_reg_e822_lp - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+static enum ice_status
+ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val,
+ bool lock_sbq)
+{
+ struct ice_sbq_msg_input msg = {0};
+ enum ice_status status;
- /* Configure the TS PLL post divisor */
- status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
- if (status)
- return status;
+ if (quad >= ICE_MAX_QUAD)
+ return ICE_ERR_PARAM;
- dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.field.time1588clk_sel_div2 = 0;
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
- if (status)
+ status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
+ status);
return status;
+ }
- /* Configure the TS PLL pre divisor and clock source */
- status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
- if (status)
- return status;
+ return ICE_SUCCESS;
+}
- dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.field.time_ref_sel = clk_src;
+enum ice_status
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+ return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true);
+}
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
- if (status)
- return status;
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static enum ice_status
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+ enum ice_status status;
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
- /* Finally, enable the PLL */
- dw24.field.ts_pll_enable = 1;
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
- if (status)
+ status = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+ status);
return status;
+ }
- /* Wait to verify if the PLL locks */
- ice_msec_delay(1, true);
-
- status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (status)
+ status = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+ status);
return status;
-
- if (!bwm_lf.field.plllock_true_lock_cri) {
- ice_warn(hw, "CGU PLL failed to lock\n");
- return ICE_ERR_NOT_READY;
}
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
+ /* For E822 based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
return ICE_SUCCESS;
}
/**
- * ice_init_cgu_e822 - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
*
- * Initialize the Clock Generation Unit of the E822 device.
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
*/
-static enum ice_status ice_init_cgu_e822(struct ice_hw *hw)
+static enum ice_status
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- union tspll_cntr_bist_settings cntr_bist;
enum ice_status status;
+ u16 lo_addr, hi_addr;
- status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (status)
- return status;
-
- /* Disable sticky lock detection so lock status reported is accurate */
- cntr_bist.field.i_plllock_sel_0 = 0;
- cntr_bist.field.i_plllock_sel_1 = 0;
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
- status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
- if (status)
+ status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+ status);
return status;
+ }
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- if (status)
+ status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
+ status);
return status;
+ }
return ICE_SUCCESS;
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 12/70] net/ice/base: move code block
2022-08-15 7:31 ` [PATCH v2 12/70] net/ice/base: move code block Qi Zhang
@ 2022-08-15 6:30 ` Yang, Qiming
0 siblings, 0 replies; 149+ messages in thread
From: Yang, Qiming @ 2022-08-15 6:30 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev, Temerkhanov, Sergey
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Temerkhanov,
> Sergey <sergey.temerkhanov@intel.com>
> Subject: [PATCH v2 12/70] net/ice/base: move code block
>
> Move some code block to the beginning of ice_ptp_hw.c to align withkernel
> driver.
>
> Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ice/base/ice_ptp_hw.c | 997 +++++++++++++++---------------
> 1 file changed, 498 insertions(+), 499 deletions(-)
>
> diff --git a/drivers/net/ice/base/ice_ptp_hw.c
> b/drivers/net/ice/base/ice_ptp_hw.c
> index 23d90b127d..22d0774dd7 100644
> --- a/drivers/net/ice/base/ice_ptp_hw.c
> +++ b/drivers/net/ice/base/ice_ptp_hw.c
> @@ -101,6 +101,286 @@ u64 ice_ptp_read_src_incval(struct ice_hw *hw)
> return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; }
>
> +/**
> + * ice_read_cgu_reg_e822 - Read a CGU register
> + * @hw: pointer to the HW struct
> + * @addr: Register address to read
> + * @val: storage for register value read
> + *
> + * Read the contents of a register of the Clock Generation Unit. Only
> + * applicable to E822 devices.
> + */
> +static enum ice_status
> +ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val) {
> + struct ice_sbq_msg_input cgu_msg;
> + enum ice_status status;
> +
> + cgu_msg.opcode = ice_sbq_msg_rd;
> + cgu_msg.dest_dev = cgu;
> + cgu_msg.msg_addr_low = addr;
> + cgu_msg.msg_addr_high = 0x0;
> +
> + status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register
> 0x%04x, status %d\n",
> + addr, status);
> + return status;
> + }
> +
> + *val = cgu_msg.data;
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_write_cgu_reg_e822 - Write a CGU register
> + * @hw: pointer to the HW struct
> + * @addr: Register address to write
> + * @val: value to write into the register
> + *
> + * Write the specified value to a register of the Clock Generation
> +Unit. Only
> + * applicable to E822 devices.
> + */
> +static enum ice_status
> +ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val) {
> + struct ice_sbq_msg_input cgu_msg;
> + enum ice_status status;
> +
> + cgu_msg.opcode = ice_sbq_msg_wr;
> + cgu_msg.dest_dev = cgu;
> + cgu_msg.msg_addr_low = addr;
> + cgu_msg.msg_addr_high = 0x0;
> + cgu_msg.data = val;
> +
> + status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register
> 0x%04x, status %d\n",
> + addr, status);
> + return status;
> + }
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_clk_freq_str - Convert time_ref_freq to string
> + * @clk_freq: Clock frequency
> + *
> + * Convert the specified TIME_REF clock frequency to a string.
> + */
> +static const char *ice_clk_freq_str(u8 clk_freq) {
> + switch ((enum ice_time_ref_freq)clk_freq) {
> + case ICE_TIME_REF_FREQ_25_000:
> + return "25 MHz";
> + case ICE_TIME_REF_FREQ_122_880:
> + return "122.88 MHz";
> + case ICE_TIME_REF_FREQ_125_000:
> + return "125 MHz";
> + case ICE_TIME_REF_FREQ_153_600:
> + return "153.6 MHz";
> + case ICE_TIME_REF_FREQ_156_250:
> + return "156.25 MHz";
> + case ICE_TIME_REF_FREQ_245_760:
> + return "245.76 MHz";
> + default:
> + return "Unknown";
> + }
> +}
> +
> +/**
> + * ice_clk_src_str - Convert time_ref_src to string
> + * @clk_src: Clock source
> + *
> + * Convert the specified clock source to its string name.
> + */
> +static const char *ice_clk_src_str(u8 clk_src) {
> + switch ((enum ice_clk_src)clk_src) {
> + case ICE_CLK_SRC_TCX0:
> + return "TCX0";
> + case ICE_CLK_SRC_TIME_REF:
> + return "TIME_REF";
> + default:
> + return "Unknown";
> + }
> +}
> +
> +/**
> + * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
> + * @hw: pointer to the HW struct
> + * @clk_freq: Clock frequency to program
> + * @clk_src: Clock source to select (TIME_REF, or TCX0)
> + *
> + * Configure the Clock Generation Unit with the desired clock frequency
> +and
> + * time reference, enabling the PLL which drives the PTP hardware clock.
> + */
> +enum ice_status
> +ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
> + enum ice_clk_src clk_src)
> +{
> + union tspll_ro_bwm_lf bwm_lf;
> + union nac_cgu_dword19 dw19;
> + union nac_cgu_dword22 dw22;
> + union nac_cgu_dword24 dw24;
> + union nac_cgu_dword9 dw9;
> + enum ice_status status;
> +
> + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
> + ice_warn(hw, "Invalid TIME_REF frequency %u\n", clk_freq);
> + return ICE_ERR_PARAM;
> + }
> +
> + if (clk_src >= NUM_ICE_CLK_SRC) {
> + ice_warn(hw, "Invalid clock source %u\n", clk_src);
> + return ICE_ERR_PARAM;
> + }
> +
> + if (clk_src == ICE_CLK_SRC_TCX0 &&
> + clk_freq != ICE_TIME_REF_FREQ_25_000) {
> + ice_warn(hw, "TCX0 only supports 25 MHz frequency\n");
> + return ICE_ERR_PARAM;
> + }
> +
> + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
> + if (status)
> + return status;
> +
> + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> &dw24.val);
> + if (status)
> + return status;
> +
> + status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF,
> &bwm_lf.val);
> + if (status)
> + return status;
> +
> + /* Log the current clock configuration */
> + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s,
> clk_src %s, clk_freq %s, PLL %s\n",
> + dw24.field.ts_pll_enable ? "enabled" : "disabled",
> + ice_clk_src_str(dw24.field.time_ref_sel),
> + ice_clk_freq_str(dw9.field.time_ref_freq_sel),
> + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
> +
> + /* Disable the PLL before changing the clock source or frequency */
> + if (dw24.field.ts_pll_enable) {
> + dw24.field.ts_pll_enable = 0;
> +
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> + if (status)
> + return status;
> + }
> +
> + /* Set the frequency */
> + dw9.field.time_ref_freq_sel = clk_freq;
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
> + if (status)
> + return status;
> +
> + /* Configure the TS PLL feedback divisor */
> + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19,
> &dw19.val);
> + if (status)
> + return status;
> +
> + dw19.field.tspll_fbdiv_intgr =
> e822_cgu_params[clk_freq].feedback_div;
> + dw19.field.tspll_ndivratio = 1;
> +
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19,
> dw19.val);
> + if (status)
> + return status;
> +
> + /* Configure the TS PLL post divisor */
> + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22,
> &dw22.val);
> + if (status)
> + return status;
> +
> + dw22.field.time1588clk_div =
> e822_cgu_params[clk_freq].post_pll_div;
> + dw22.field.time1588clk_sel_div2 = 0;
> +
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22,
> dw22.val);
> + if (status)
> + return status;
> +
> + /* Configure the TS PLL pre divisor and clock source */
> + status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> &dw24.val);
> + if (status)
> + return status;
> +
> + dw24.field.ref1588_ck_div =
> e822_cgu_params[clk_freq].refclk_pre_div;
> + dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
> + dw24.field.time_ref_sel = clk_src;
> +
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> + if (status)
> + return status;
> +
> + /* Finally, enable the PLL */
> + dw24.field.ts_pll_enable = 1;
> +
> + status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> + if (status)
> + return status;
> +
> + /* Wait to verify if the PLL locks */
> + ice_msec_delay(1, true);
> +
> + status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF,
> &bwm_lf.val);
> + if (status)
> + return status;
> +
> + if (!bwm_lf.field.plllock_true_lock_cri) {
> + ice_warn(hw, "CGU PLL failed to lock\n");
> + return ICE_ERR_NOT_READY;
> + }
> +
> + /* Log the current clock configuration */
> + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s,
> clk_src %s, clk_freq %s, PLL %s\n",
> + dw24.field.ts_pll_enable ? "enabled" : "disabled",
> + ice_clk_src_str(dw24.field.time_ref_sel),
> + ice_clk_freq_str(dw9.field.time_ref_freq_sel),
> + bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_init_cgu_e822 - Initialize CGU with settings from firmware
> + * @hw: pointer to the HW structure
> + *
> + * Initialize the Clock Generation Unit of the E822 device.
> + */
> +static enum ice_status ice_init_cgu_e822(struct ice_hw *hw) {
> + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
> + union tspll_cntr_bist_settings cntr_bist;
> + enum ice_status status;
> +
> + status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
> + &cntr_bist.val);
> + if (status)
> + return status;
> +
> + /* Disable sticky lock detection so lock status reported is accurate */
> + cntr_bist.field.i_plllock_sel_0 = 0;
> + cntr_bist.field.i_plllock_sel_1 = 0;
> +
> + status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
> + cntr_bist.val);
> + if (status)
> + return status;
> +
> + /* Configure the CGU PLL using the parameters from the function
> + * capabilities.
> + */
> + status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
> + (enum ice_clk_src)ts_info->clk_src);
> + if (status)
> + return status;
> +
> + return ICE_SUCCESS;
> +}
> +
> /**
> * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
> * @hw: pointer to HW struct
> @@ -346,261 +626,59 @@ ice_read_40b_phy_reg_e822(struct ice_hw *hw,
> u8 port, u16 low_addr, u64 *val)
> * The high offset is looked up. This function only operates on registers
> * known to be two parts of a 64bit value.
> */
> -static enum ice_status
> -ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> u64 *val) -{
> - enum ice_status status;
> - u32 low, high;
> - u16 high_addr;
> -
> - /* Only operate on registers known to be split into two 32bit
> - * registers.
> - */
> - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
> - ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr
> 0x%08x\n",
> - low_addr);
> - return ICE_ERR_PARAM;
> - }
> -
> - status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to read from low
> register 0x%08x\n, status %d",
> - low_addr, status);
> - return status;
> - }
> -
> - status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to read from high
> register 0x%08x\n, status %d",
> - high_addr, status);
> - return status;
> - }
> -
> - *val = (u64)high << 32 | low;
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_write_phy_reg_e822_lp - Write a PHY register
> - * @hw: pointer to the HW struct
> - * @port: PHY port to write to
> - * @offset: PHY register offset to write
> - * @val: The value to write to the register
> - * @lock_sbq: true if the sideband queue lock must be acquired
> - *
> - * Write a PHY register for the given port over the device sideband queue.
> - */
> -static enum ice_status
> -ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
> - bool lock_sbq)
> -{
> - struct ice_sbq_msg_input msg = {0};
> - enum ice_status status;
> -
> - ice_fill_phy_msg_e822(&msg, port, offset);
> - msg.opcode = ice_sbq_msg_wr;
> - msg.data = val;
> -
> - status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to send message to
> phy, status %d\n",
> - status);
> - return status;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -enum ice_status
> -ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val) -{
> - return ice_write_phy_reg_e822_lp(hw, port, offset, val, true);
> -}
> -
> -/**
> - * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
> - * @hw: pointer to the HW struct
> - * @port: port to write to
> - * @low_addr: offset of the low register
> - * @val: 40b value to write
> - *
> - * Write the provided 40b value to the two associated registers by splitting
> - * it up into two chunks, the lower 8 bits and the upper 32 bits.
> - */
> -static enum ice_status
> -ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> u64 val) -{
> - enum ice_status status;
> - u32 low, high;
> - u16 high_addr;
> -
> - /* Only operate on registers known to be split into a lower 8 bit
> - * register and an upper 32 bit register.
> - */
> - if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
> - ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr
> 0x%08x\n",
> - low_addr);
> - return ICE_ERR_PARAM;
> - }
> -
> - low = (u32)(val & P_REG_40B_LOW_M);
> - high = (u32)(val >> P_REG_40B_HIGH_S);
> -
> - status = ice_write_phy_reg_e822(hw, port, low_addr, low);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register
> 0x%08x\n, status %d",
> - low_addr, status);
> - return status;
> - }
> -
> - status = ice_write_phy_reg_e822(hw, port, high_addr, high);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to write to high
> register 0x%08x\n, status %d",
> - high_addr, status);
> - return status;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
> - * @hw: pointer to the HW struct
> - * @port: PHY port to read from
> - * @low_addr: offset of the lower register to read from
> - * @val: the contents of the 64bit value to write to PHY
> - *
> - * Write the 64bit value to the two associated 32bit PHY registers. The
> offset
> - * is always specified as the lower register, and the high address is looked
> - * up. This function only operates on registers known to be two parts of
> - * a 64bit value.
> - */
> -static enum ice_status
> -ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> u64 val) -{
> - enum ice_status status;
> - u32 low, high;
> - u16 high_addr;
> -
> - /* Only operate on registers known to be split into two 32bit
> - * registers.
> - */
> - if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
> - ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr
> 0x%08x\n",
> - low_addr);
> - return ICE_ERR_PARAM;
> - }
> -
> - low = ICE_LO_DWORD(val);
> - high = ICE_HI_DWORD(val);
> -
> - status = ice_write_phy_reg_e822(hw, port, low_addr, low);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register
> 0x%08x\n, status %d",
> - low_addr, status);
> - return status;
> - }
> -
> - status = ice_write_phy_reg_e822(hw, port, high_addr, high);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to write to high
> register 0x%08x\n, status %d",
> - high_addr, status);
> - return status;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_fill_quad_msg_e822 - Fill message data for quad register access
> - * @msg: the PHY message buffer to fill in
> - * @quad: the quad to access
> - * @offset: the register offset
> - *
> - * Fill a message buffer for accessing a register in a quad shared between
> - * multiple PHYs.
> - */
> -static void
> -ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16
> offset) -{
> - u32 addr;
> -
> - msg->dest_dev = rmn_0;
> -
> - if ((quad % ICE_NUM_QUAD_TYPE) == 0)
> - addr = Q_0_BASE + offset;
> - else
> - addr = Q_1_BASE + offset;
> -
> - msg->msg_addr_low = ICE_LO_WORD(addr);
> - msg->msg_addr_high = ICE_HI_WORD(addr);
> -}
> -
> -/**
> - * ice_read_quad_reg_e822_lp - Read a PHY quad register
> - * @hw: pointer to the HW struct
> - * @quad: quad to read from
> - * @offset: quad register offset to read
> - * @val: on return, the contents read from the quad
> - * @lock_sbq: true if the sideband queue lock must be acquired
> - *
> - * Read a quad register over the device sideband queue. Quad registers are
> - * shared between multiple PHYs.
> - */
> -static enum ice_status
> -ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32
> *val,
> - bool lock_sbq)
> +static enum ice_status
> +ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> u64
> +*val)
> {
> - struct ice_sbq_msg_input msg = {0};
> enum ice_status status;
> + u32 low, high;
> + u16 high_addr;
>
> - if (quad >= ICE_MAX_QUAD)
> + /* Only operate on registers known to be split into two 32bit
> + * registers.
> + */
> + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
> + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr
> 0x%08x\n",
> + low_addr);
> return ICE_ERR_PARAM;
> + }
>
> - ice_fill_quad_msg_e822(&msg, quad, offset);
> - msg.opcode = ice_sbq_msg_rd;
> + status = ice_read_phy_reg_e822(hw, port, low_addr, &low);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low
> register 0x%08x\n, status %d",
> + low_addr, status);
> + return status;
> + }
>
> - status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
> + status = ice_read_phy_reg_e822(hw, port, high_addr, &high);
> if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to send message to
> phy, status %d\n",
> - status);
> + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high
> register 0x%08x\n, status %d",
> + high_addr, status);
> return status;
> }
>
> - *val = msg.data;
> + *val = (u64)high << 32 | low;
>
> return ICE_SUCCESS;
> }
>
> -enum ice_status
> -ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
> -{
> - return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true);
> -}
> -
> /**
> - * ice_write_quad_reg_e822_lp - Write a PHY quad register
> + * ice_write_phy_reg_e822_lp - Write a PHY register
> * @hw: pointer to the HW struct
> - * @quad: quad to write to
> - * @offset: quad register offset to write
> + * @port: PHY port to write to
> + * @offset: PHY register offset to write
> * @val: The value to write to the register
> * @lock_sbq: true if the sideband queue lock must be acquired
> *
> - * Write a quad register over the device sideband queue. Quad registers
> are
> - * shared between multiple PHYs.
> + * Write a PHY register for the given port over the device sideband queue.
> */
> static enum ice_status
> -ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32
> val,
> - bool lock_sbq)
> +ice_write_phy_reg_e822_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
> + bool lock_sbq)
> {
> struct ice_sbq_msg_input msg = {0};
> enum ice_status status;
>
> - if (quad >= ICE_MAX_QUAD)
> - return ICE_ERR_PARAM;
> -
> - ice_fill_quad_msg_e822(&msg, quad, offset);
> + ice_fill_phy_msg_e822(&msg, port, offset);
> msg.opcode = ice_sbq_msg_wr;
> msg.data = val;
>
> @@ -615,84 +693,51 @@ ice_write_quad_reg_e822_lp(struct ice_hw *hw,
> u8 quad, u16 offset, u32 val, }
>
> enum ice_status
> -ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
> +ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
> {
> - return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true);
> + return ice_write_phy_reg_e822_lp(hw, port, offset, val, true);
> }
>
> /**
> - * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad
> block
> + * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
> * @hw: pointer to the HW struct
> - * @quad: the quad to read from
> - * @idx: the timestamp index to read
> - * @tstamp: on return, the 40bit timestamp value
> + * @port: port to write to
> + * @low_addr: offset of the low register
> + * @val: 40b value to write
> *
> - * Read a 40bit timestamp value out of the two associated registers in the
> - * quad memory block that is shared between the internal PHYs of the
> E822
> - * family of devices.
> + * Write the provided 40b value to the two associated registers by
> + splitting
> + * it up into two chunks, the lower 8 bits and the upper 32 bits.
> */
> static enum ice_status
> -ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64
> *tstamp)
> +ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> +u64 val)
> {
> enum ice_status status;
> - u16 lo_addr, hi_addr;
> - u32 lo, hi;
> -
> - lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
> - hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
> -
> - status = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP
> timestamp register, status %d\n",
> - status);
> - return status;
> - }
> -
> - status = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
> - if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP
> timestamp register, status %d\n",
> - status);
> - return status;
> - }
> + u32 low, high;
> + u16 high_addr;
>
> - /* For E822 based internal PHYs, the timestamp is reported with the
> - * lower 8 bits in the low register, and the upper 32 bits in the high
> - * register.
> + /* Only operate on registers known to be split into a lower 8 bit
> + * register and an upper 32 bit register.
> */
> - *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo &
> TS_PHY_LOW_M);
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
> - * @hw: pointer to the HW struct
> - * @quad: the quad to read from
> - * @idx: the timestamp index to reset
> - *
> - * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
> - * shared between the internal PHYs on the E822 devices.
> - */
> -static enum ice_status
> -ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx) -{
> - enum ice_status status;
> - u16 lo_addr, hi_addr;
> + if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
> + ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr
> 0x%08x\n",
> + low_addr);
> + return ICE_ERR_PARAM;
> + }
>
> - lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
> - hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
> + low = (u32)(val & P_REG_40B_LOW_M);
> + high = (u32)(val >> P_REG_40B_HIGH_S);
>
> - status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
> + status = ice_write_phy_reg_e822(hw, port, low_addr, low);
> if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP
> timestamp register, status %d\n",
> - status);
> + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register
> 0x%08x\n, status %d",
> + low_addr, status);
> return status;
> }
>
> - status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
> + status = ice_write_phy_reg_e822(hw, port, high_addr, high);
> if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP
> timestamp register, status %d\n",
> - status);
> + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high
> register 0x%08x\n, status %d",
> + high_addr, status);
> return status;
> }
>
> @@ -700,282 +745,236 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw,
> u8 quad, u8 idx) }
>
> /**
> - * ice_read_cgu_reg_e822 - Read a CGU register
> + * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
> * @hw: pointer to the HW struct
> - * @addr: Register address to read
> - * @val: storage for register value read
> + * @port: PHY port to read from
> + * @low_addr: offset of the lower register to read from
> + * @val: the contents of the 64bit value to write to PHY
> *
> - * Read the contents of a register of the Clock Generation Unit. Only
> - * applicable to E822 devices.
> + * Write the 64bit value to the two associated 32bit PHY registers. The
> + offset
> + * is always specified as the lower register, and the high address is
> + looked
> + * up. This function only operates on registers known to be two parts
> + of
> + * a 64bit value.
> */
> static enum ice_status
> -ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val)
> +ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr,
> +u64 val)
> {
> - struct ice_sbq_msg_input cgu_msg;
> enum ice_status status;
> + u32 low, high;
> + u16 high_addr;
>
> - cgu_msg.opcode = ice_sbq_msg_rd;
> - cgu_msg.dest_dev = cgu;
> - cgu_msg.msg_addr_low = addr;
> - cgu_msg.msg_addr_high = 0x0;
> + /* Only operate on registers known to be split into two 32bit
> + * registers.
> + */
> + if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
> + ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr
> 0x%08x\n",
> + low_addr);
> + return ICE_ERR_PARAM;
> + }
>
> - status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
> + low = ICE_LO_DWORD(val);
> + high = ICE_HI_DWORD(val);
> +
> + status = ice_write_phy_reg_e822(hw, port, low_addr, low);
> if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register
> 0x%04x, status %d\n",
> - addr, status);
> + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register
> 0x%08x\n, status %d",
> + low_addr, status);
> return status;
> }
>
> - *val = cgu_msg.data;
> -
> - return status;
> -}
> -
> -/**
> - * ice_write_cgu_reg_e822 - Write a CGU register
> - * @hw: pointer to the HW struct
> - * @addr: Register address to write
> - * @val: value to write into the register
> - *
> - * Write the specified value to a register of the Clock Generation Unit. Only
> - * applicable to E822 devices.
> - */
> -static enum ice_status
> -ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val) -{
> - struct ice_sbq_msg_input cgu_msg;
> - enum ice_status status;
> -
> - cgu_msg.opcode = ice_sbq_msg_wr;
> - cgu_msg.dest_dev = cgu;
> - cgu_msg.msg_addr_low = addr;
> - cgu_msg.msg_addr_high = 0x0;
> - cgu_msg.data = val;
> -
> - status = ice_sbq_rw_reg_lp(hw, &cgu_msg, true);
> + status = ice_write_phy_reg_e822(hw, port, high_addr, high);
> if (status) {
> - ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register
> 0x%04x, status %d\n",
> - addr, status);
> + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high
> register 0x%08x\n, status %d",
> + high_addr, status);
> return status;
> }
>
> - return status;
> -}
> -
> -/**
> - * ice_clk_freq_str - Convert time_ref_freq to string
> - * @clk_freq: Clock frequency
> - *
> - * Convert the specified TIME_REF clock frequency to a string.
> - */
> -static const char *ice_clk_freq_str(u8 clk_freq) -{
> - switch ((enum ice_time_ref_freq)clk_freq) {
> - case ICE_TIME_REF_FREQ_25_000:
> - return "25 MHz";
> - case ICE_TIME_REF_FREQ_122_880:
> - return "122.88 MHz";
> - case ICE_TIME_REF_FREQ_125_000:
> - return "125 MHz";
> - case ICE_TIME_REF_FREQ_153_600:
> - return "153.6 MHz";
> - case ICE_TIME_REF_FREQ_156_250:
> - return "156.25 MHz";
> - case ICE_TIME_REF_FREQ_245_760:
> - return "245.76 MHz";
> - default:
> - return "Unknown";
> - }
> + return ICE_SUCCESS;
> }
>
> /**
> - * ice_clk_src_str - Convert time_ref_src to string
> - * @clk_src: Clock source
> + * ice_fill_quad_msg_e822 - Fill message data for quad register access
> + * @msg: the PHY message buffer to fill in
> + * @quad: the quad to access
> + * @offset: the register offset
> *
> - * Convert the specified clock source to its string name.
> - */
> -static const char *ice_clk_src_str(u8 clk_src) -{
> - switch ((enum ice_clk_src)clk_src) {
> - case ICE_CLK_SRC_TCX0:
> - return "TCX0";
> - case ICE_CLK_SRC_TIME_REF:
> - return "TIME_REF";
> - default:
> - return "Unknown";
> - }
> + * Fill a message buffer for accessing a register in a quad shared
> +between
> + * multiple PHYs.
> + */
> +static void
> +ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16
> +offset) {
> + u32 addr;
> +
> + msg->dest_dev = rmn_0;
> +
> + if ((quad % ICE_NUM_QUAD_TYPE) == 0)
> + addr = Q_0_BASE + offset;
> + else
> + addr = Q_1_BASE + offset;
> +
> + msg->msg_addr_low = ICE_LO_WORD(addr);
> + msg->msg_addr_high = ICE_HI_WORD(addr);
> }
>
> /**
> - * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
> + * ice_read_quad_reg_e822_lp - Read a PHY quad register
> * @hw: pointer to the HW struct
> - * @clk_freq: Clock frequency to program
> - * @clk_src: Clock source to select (TIME_REF, or TCX0)
> + * @quad: quad to read from
> + * @offset: quad register offset to read
> + * @val: on return, the contents read from the quad
> + * @lock_sbq: true if the sideband queue lock must be acquired
> *
> - * Configure the Clock Generation Unit with the desired clock frequency
> and
> - * time reference, enabling the PLL which drives the PTP hardware clock.
> + * Read a quad register over the device sideband queue. Quad registers
> + are
> + * shared between multiple PHYs.
> */
> -enum ice_status
> -ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
> - enum ice_clk_src clk_src)
> +static enum ice_status
> +ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32
> *val,
> + bool lock_sbq)
> {
> - union tspll_ro_bwm_lf bwm_lf;
> - union nac_cgu_dword19 dw19;
> - union nac_cgu_dword22 dw22;
> - union nac_cgu_dword24 dw24;
> - union nac_cgu_dword9 dw9;
> + struct ice_sbq_msg_input msg = {0};
> enum ice_status status;
>
> - if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
> - ice_warn(hw, "Invalid TIME_REF frequency %u\n", clk_freq);
> - return ICE_ERR_PARAM;
> - }
> -
> - if (clk_src >= NUM_ICE_CLK_SRC) {
> - ice_warn(hw, "Invalid clock source %u\n", clk_src);
> - return ICE_ERR_PARAM;
> - }
> -
> - if (clk_src == ICE_CLK_SRC_TCX0 &&
> - clk_freq != ICE_TIME_REF_FREQ_25_000) {
> - ice_warn(hw, "TCX0 only supports 25 MHz frequency\n");
> + if (quad >= ICE_MAX_QUAD)
> return ICE_ERR_PARAM;
> - }
> -
> - status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
> - if (status)
> - return status;
>
> - status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> &dw24.val);
> - if (status)
> - return status;
> + ice_fill_quad_msg_e822(&msg, quad, offset);
> + msg.opcode = ice_sbq_msg_rd;
>
> - status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF,
> &bwm_lf.val);
> - if (status)
> + status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to
> phy, status %d\n",
> + status);
> return status;
> -
> - /* Log the current clock configuration */
> - ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s,
> clk_src %s, clk_freq %s, PLL %s\n",
> - dw24.field.ts_pll_enable ? "enabled" : "disabled",
> - ice_clk_src_str(dw24.field.time_ref_sel),
> - ice_clk_freq_str(dw9.field.time_ref_freq_sel),
> - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
> -
> - /* Disable the PLL before changing the clock source or frequency */
> - if (dw24.field.ts_pll_enable) {
> - dw24.field.ts_pll_enable = 0;
> -
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> - if (status)
> - return status;
> }
>
> - /* Set the frequency */
> - dw9.field.time_ref_freq_sel = clk_freq;
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
> - if (status)
> - return status;
> + *val = msg.data;
>
> - /* Configure the TS PLL feedback divisor */
> - status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19,
> &dw19.val);
> - if (status)
> - return status;
> + return ICE_SUCCESS;
> +}
>
> - dw19.field.tspll_fbdiv_intgr =
> e822_cgu_params[clk_freq].feedback_div;
> - dw19.field.tspll_ndivratio = 1;
> +enum ice_status
> +ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32
> +*val) {
> + return ice_read_quad_reg_e822_lp(hw, quad, offset, val, true); }
>
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19,
> dw19.val);
> - if (status)
> - return status;
> +/**
> + * ice_write_quad_reg_e822_lp - Write a PHY quad register
> + * @hw: pointer to the HW struct
> + * @quad: quad to write to
> + * @offset: quad register offset to write
> + * @val: The value to write to the register
> + * @lock_sbq: true if the sideband queue lock must be acquired
> + *
> + * Write a quad register over the device sideband queue. Quad registers
> +are
> + * shared between multiple PHYs.
> + */
> +static enum ice_status
> +ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32
> val,
> + bool lock_sbq)
> +{
> + struct ice_sbq_msg_input msg = {0};
> + enum ice_status status;
>
> - /* Configure the TS PLL post divisor */
> - status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22,
> &dw22.val);
> - if (status)
> - return status;
> + if (quad >= ICE_MAX_QUAD)
> + return ICE_ERR_PARAM;
>
> - dw22.field.time1588clk_div =
> e822_cgu_params[clk_freq].post_pll_div;
> - dw22.field.time1588clk_sel_div2 = 0;
> + ice_fill_quad_msg_e822(&msg, quad, offset);
> + msg.opcode = ice_sbq_msg_wr;
> + msg.data = val;
>
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22,
> dw22.val);
> - if (status)
> + status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to send message to
> phy, status %d\n",
> + status);
> return status;
> + }
>
> - /* Configure the TS PLL pre divisor and clock source */
> - status = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> &dw24.val);
> - if (status)
> - return status;
> + return ICE_SUCCESS;
> +}
>
> - dw24.field.ref1588_ck_div =
> e822_cgu_params[clk_freq].refclk_pre_div;
> - dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
> - dw24.field.time_ref_sel = clk_src;
> +enum ice_status
> +ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32
> +val) {
> + return ice_write_quad_reg_e822_lp(hw, quad, offset, val, true); }
>
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> - if (status)
> - return status;
> +/**
> + * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad
> +block
> + * @hw: pointer to the HW struct
> + * @quad: the quad to read from
> + * @idx: the timestamp index to read
> + * @tstamp: on return, the 40bit timestamp value
> + *
> + * Read a 40bit timestamp value out of the two associated registers in
> +the
> + * quad memory block that is shared between the internal PHYs of the
> +E822
> + * family of devices.
> + */
> +static enum ice_status
> +ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64
> +*tstamp) {
> + enum ice_status status;
> + u16 lo_addr, hi_addr;
> + u32 lo, hi;
>
> - /* Finally, enable the PLL */
> - dw24.field.ts_pll_enable = 1;
> + lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
> + hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
>
> - status = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24,
> dw24.val);
> - if (status)
> + status = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP
> timestamp register, status %d\n",
> + status);
> return status;
> + }
>
> - /* Wait to verify if the PLL locks */
> - ice_msec_delay(1, true);
> -
> - status = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF,
> &bwm_lf.val);
> - if (status)
> + status = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP
> timestamp register, status %d\n",
> + status);
> return status;
> -
> - if (!bwm_lf.field.plllock_true_lock_cri) {
> - ice_warn(hw, "CGU PLL failed to lock\n");
> - return ICE_ERR_NOT_READY;
> }
>
> - /* Log the current clock configuration */
> - ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s,
> clk_src %s, clk_freq %s, PLL %s\n",
> - dw24.field.ts_pll_enable ? "enabled" : "disabled",
> - ice_clk_src_str(dw24.field.time_ref_sel),
> - ice_clk_freq_str(dw9.field.time_ref_freq_sel),
> - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
> -
> + /* For E822 based internal PHYs, the timestamp is reported with the
> + * lower 8 bits in the low register, and the upper 32 bits in the high
> + * register.
> + */
> + *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo &
> TS_PHY_LOW_M);
>
> return ICE_SUCCESS;
> }
>
> /**
> - * ice_init_cgu_e822 - Initialize CGU with settings from firmware
> - * @hw: pointer to the HW structure
> + * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
> + * @hw: pointer to the HW struct
> + * @quad: the quad to read from
> + * @idx: the timestamp index to reset
> *
> - * Initialize the Clock Generation Unit of the E822 device.
> + * Clear a timestamp, resetting its valid bit, from the PHY quad block
> + that is
> + * shared between the internal PHYs on the E822 devices.
> */
> -static enum ice_status ice_init_cgu_e822(struct ice_hw *hw)
> +static enum ice_status
> +ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
> {
> - struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
> - union tspll_cntr_bist_settings cntr_bist;
> enum ice_status status;
> + u16 lo_addr, hi_addr;
>
> - status = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
> - &cntr_bist.val);
> - if (status)
> - return status;
> -
> - /* Disable sticky lock detection so lock status reported is accurate */
> - cntr_bist.field.i_plllock_sel_0 = 0;
> - cntr_bist.field.i_plllock_sel_1 = 0;
> + lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
> + hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
>
> - status = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
> - cntr_bist.val);
> - if (status)
> + status = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP
> timestamp register, status %d\n",
> + status);
> return status;
> + }
>
> - /* Configure the CGU PLL using the parameters from the function
> - * capabilities.
> - */
> - status = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
> - (enum ice_clk_src)ts_info->clk_src);
> - if (status)
> + status = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP
> timestamp register, status %d\n",
> + status);
> return status;
> + }
>
> return ICE_SUCCESS;
> }
> --
> 2.31.1
12/70-19/70
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 13/70] net/ice/base: add PHY 56G destination address
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (11 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 12/70] net/ice/base: move code block Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 14/70] net/ice/base: add 56G PHY register definitions Qi Zhang
` (57 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Add PHY 56G destination address. PHY56G is a single device
incorporating all SerDes lanes
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_sbq_cmd.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/ice/base/ice_sbq_cmd.h b/drivers/net/ice/base/ice_sbq_cmd.h
index a5fe43bf26..76c718b252 100644
--- a/drivers/net/ice/base/ice_sbq_cmd.h
+++ b/drivers/net/ice/base/ice_sbq_cmd.h
@@ -48,6 +48,7 @@ struct ice_sbq_evt_desc {
};
enum ice_sbq_msg_dev {
+ phy_56g = 0x02,
rmn_0 = 0x02,
rmn_1 = 0x03,
rmn_2 = 0x04,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 14/70] net/ice/base: add 56G PHY register definitions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (12 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 13/70] net/ice/base: add PHY 56G destination address Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 15/70] net/ice/base: implement 56G PHY access functions Qi Zhang
` (56 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Add 56G PHY register address definitions to facilitate 56G PHY
support.
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.h | 75 +++++++++++++++++++++++++++++++
1 file changed, 75 insertions(+)
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index 9cc3436aa8..ecb79eaea9 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -482,5 +482,80 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define ICE_E810T_SMA_MIN_BIT 3
#define ICE_E810T_SMA_MAX_BIT 7
#define ICE_E810T_P1_OFFSET 8
+/* 56G PHY quad register base addresses */
+#define ICE_PHY0_BASE 0x092000
+#define ICE_PHY1_BASE 0x126000
+#define ICE_PHY2_BASE 0x1BA000
+#define ICE_PHY3_BASE 0x24E000
+#define ICE_PHY4_BASE 0x2E2000
+
+/* Timestamp memory */
+#define PHY_PTP_LANE_ADDR_STEP 0x98
+
+#define PHY_PTP_MEM_START 0x1000
+#define PHY_PTP_MEM_LANE_STEP 0x04A0
+#define PHY_PTP_MEM_LOCATIONS 0x40
+
+/* Number of PHY ports */
+#define ICE_NUM_PHY_PORTS 5
+/* Timestamp PHY incval registers */
+#define PHY_REG_TIMETUS_L 0x8
+#define PHY_REG_TIMETUS_U 0xC
+
+/* Timestamp init registers */
+#define PHY_REG_RX_TIMER_INC_PRE_L 0x64
+#define PHY_REG_RX_TIMER_INC_PRE_U 0x68
+
+#define PHY_REG_TX_TIMER_INC_PRE_L 0x44
+#define PHY_REG_TX_TIMER_INC_PRE_U 0x48
+
+/* Timestamp match and adjust target registers */
+#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C
+#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70
+
+#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C
+#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50
+
+/* Timestamp command registers */
+#define PHY_REG_TX_TMR_CMD 0x40
+#define PHY_REG_RX_TMR_CMD 0x60
+
+/* Phy offset ready registers */
+#define PHY_REG_TX_OFFSET_READY 0x54
+#define PHY_REG_RX_OFFSET_READY 0x74
+/* Phy total offset registers */
+#define PHY_REG_TOTAL_TX_OFFSET_L 0x38
+#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C
+
+#define PHY_REG_TOTAL_RX_OFFSET_L 0x58
+#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C
+
+/* Timestamp capture registers */
+#define PHY_REG_TX_CAPTURE_L 0x78
+#define PHY_REG_TX_CAPTURE_U 0x7C
+
+#define PHY_REG_RX_CAPTURE_L 0x8C
+#define PHY_REG_RX_CAPTURE_U 0x90
+
+/* Memory status registers */
+#define PHY_REG_TX_MEMORY_STATUS_L 0x80
+#define PHY_REG_TX_MEMORY_STATUS_U 0x84
+
+/* Interrupt config register */
+#define PHY_REG_TS_INT_CONFIG 0x88
+
+#define PHY_PTP_INT_STATUS 0x7FD140
+
+#define PHY_TS_INT_CONFIG_THRESHOLD_S 0
+#define PHY_TS_INT_CONFIG_THRESHOLD_M MAKEMASK(0x3F, 0)
+#define PHY_TS_INT_CONFIG_ENA_S 6
+#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
+
+/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
+#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
+#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
+
+#define PHY_REG_REVISION 0x85000
+#define PHY_REVISION_ETH56G 0x10200
#endif /* _ICE_PTP_HW_H_ */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 15/70] net/ice/base: implement 56G PHY access functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (13 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 14/70] net/ice/base: add 56G PHY register definitions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 16/70] net/ice/base: implement 56G PHY setup functions Qi Zhang
` (55 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Implement 56G PHY register and memory read/write functions
to facilitate PTP support
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 1094 +++++++++++++++++++++++++++--
drivers/net/ice/base/ice_ptp_hw.h | 44 +-
drivers/net/ice/base/ice_type.h | 11 +
3 files changed, 1090 insertions(+), 59 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 22d0774dd7..1c5fd799f6 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -395,7 +395,976 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
-/* E822 family functions
+/**
+ * ice_ptp_clean_cmd - Clean the timer command register
+ * @hw: pointer to HW struct
+ *
+ * Zero out the GLTSYN_CMD to avoid any residual command execution.
+ */
+static void ice_ptp_clean_cmd(struct ice_hw *hw)
+{
+ wr32(hw, GLTSYN_CMD, 0);
+ ice_flush(hw);
+}
+
+/* 56G PHY access functions */
+static const u32 eth56g_port_base[ICE_NUM_PHY_PORTS] = {
+ ICE_PHY0_BASE,
+ ICE_PHY1_BASE,
+ ICE_PHY2_BASE,
+ ICE_PHY3_BASE,
+ ICE_PHY4_BASE,
+};
+
+/**
+ * ice_write_phy_eth56g_raw_lp - Write a PHY port register with lock parameter
+ * @hw: pointer to the HW struct
+ * @reg_addr: PHY register address
+ * @val: Value to write
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_write_phy_eth56g_raw_lp(struct ice_hw *hw, u32 reg_addr, u32 val,
+ bool lock_sbq)
+{
+ struct ice_sbq_msg_input phy_msg;
+ enum ice_status status;
+
+ phy_msg.opcode = ice_sbq_msg_wr;
+
+ phy_msg.msg_addr_low = ICE_LO_WORD(reg_addr);
+ phy_msg.msg_addr_high = ICE_HI_WORD(reg_addr);
+
+ phy_msg.data = val;
+ phy_msg.dest_dev = phy_56g;
+
+ status = ice_sbq_rw_reg_lp(hw, &phy_msg, lock_sbq);
+
+ if (status)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ status);
+
+ return status;
+}
+
+/**
+ * ice_read_phy_eth56g_raw_lp - Read a PHY port register with lock parameter
+ * @hw: pointer to the HW struct
+ * @reg_addr: PHY port register address
+ * @val: Pointer to the value to read (out param)
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_read_phy_eth56g_raw_lp(struct ice_hw *hw, u32 reg_addr, u32 *val,
+ bool lock_sbq)
+{
+ struct ice_sbq_msg_input phy_msg;
+ enum ice_status status;
+
+ phy_msg.opcode = ice_sbq_msg_rd;
+
+ phy_msg.msg_addr_low = ICE_LO_WORD(reg_addr);
+ phy_msg.msg_addr_high = ICE_HI_WORD(reg_addr);
+
+ phy_msg.dest_dev = phy_56g;
+
+ status = ice_sbq_rw_reg_lp(hw, &phy_msg, lock_sbq);
+
+ if (status)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ status);
+ else
+ *val = phy_msg.data;
+
+ return status;
+}
+
+/**
+ * ice_phy_port_reg_address_eth56g - Calculate a PHY port register address
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @address: The result address
+ */
+static enum ice_status
+ice_phy_port_reg_address_eth56g(u8 port, u16 offset, u32 *address)
+{
+ u8 phy, lane;
+
+ if (port >= ICE_NUM_EXTERNAL_PORTS)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ phy = port / ICE_PORTS_PER_QUAD;
+ lane = port % ICE_PORTS_PER_QUAD;
+
+ *address = offset + eth56g_port_base[phy] +
+ PHY_PTP_LANE_ADDR_STEP * lane;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_phy_reg_eth56g_lp - Write a PHY port register with lock parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_write_phy_reg_eth56g_lp(struct ice_hw *hw, u8 port, u16 offset, u32 val,
+ bool lock_sbq)
+{
+ enum ice_status status;
+ u32 reg_addr;
+
+ status = ice_phy_port_reg_address_eth56g(port, offset, ®_addr);
+ if (status)
+ return status;
+
+ return ice_write_phy_eth56g_raw_lp(hw, reg_addr, val, lock_sbq);
+}
+
+/**
+ * ice_write_phy_reg_eth56g - Write a PHY port register with sbq locked
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ */
+enum ice_status
+ice_write_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ return ice_write_phy_reg_eth56g_lp(hw, port, offset, val, true);
+}
+
+/**
+ * ice_read_phy_reg_eth56g_lp - Read a PHY port register with
+ * lock parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_read_phy_reg_eth56g_lp(struct ice_hw *hw, u8 port, u16 offset, u32 *val,
+ bool lock_sbq)
+{
+ enum ice_status status;
+ u32 reg_addr;
+
+ status = ice_phy_port_reg_address_eth56g(port, offset, ®_addr);
+ if (status)
+ return status;
+
+ return ice_read_phy_eth56g_raw_lp(hw, reg_addr, val, lock_sbq);
+}
+
+/**
+ * ice_read_phy_reg_eth56g - Read a PHY port register with sbq locked
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ */
+enum ice_status
+ice_read_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ return ice_read_phy_reg_eth56g_lp(hw, port, offset, val, true);
+}
+
+/**
+ * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers.
+ */
+static bool ice_is_64b_phy_reg_eth56g(u16 low_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TX_TIMER_INC_PRE_L:
+ case PHY_REG_RX_TIMER_INC_PRE_L:
+ case PHY_REG_TX_CAPTURE_L:
+ case PHY_REG_RX_CAPTURE_L:
+ case PHY_REG_TOTAL_TX_OFFSET_L:
+ case PHY_REG_TOTAL_RX_OFFSET_L:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register.
+ */
+static bool ice_is_40b_phy_reg_eth56g(u16 low_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TIMETUS_L:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_40b_phy_reg_eth56g - Read a 40bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 40bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 40bit value and returns it in the
+ * val pointer.
+ * This function checks that the caller has specified a known 40 bit register
+ * offset
+ */
+static enum ice_status
+ice_read_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u16 high_addr = low_addr + sizeof(u32);
+ enum ice_status status;
+ u32 lo, hi;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr))
+ return ICE_ERR_PARAM;
+
+ status = ice_read_phy_reg_eth56g(hw, port, low_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, status %d",
+ (int)low_addr, status);
+ return status;
+ }
+
+ status = ice_read_phy_reg_eth56g(hw, port, low_addr + sizeof(u32), &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %08x\n, status %d",
+ high_addr, status);
+ return status;
+ }
+
+ *val = ((u64)hi << P_REG_40B_HIGH_S) | (lo & P_REG_40B_LOW_M);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer.
+ * This function checks that the caller has specified a known 64 bit register
+ * offset
+ */
+static enum ice_status
+ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u16 high_addr = low_addr + sizeof(u32);
+ enum ice_status status;
+ u32 lo, hi;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr))
+ return ICE_ERR_PARAM;
+
+ status = ice_read_phy_reg_eth56g(hw, port, low_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, status %d",
+ low_addr, status);
+ return status;
+ }
+
+ status = ice_read_phy_reg_eth56g(hw, port, high_addr, &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, status %d",
+ high_addr, status);
+ return status;
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ * This function checks that the caller has specified a known 40 bit register
+ * offset
+ */
+static enum ice_status
+ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u16 high_addr = low_addr + sizeof(u32);
+ enum ice_status status;
+ u32 lo, hi;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr))
+ return ICE_ERR_PARAM;
+
+ lo = (u32)(val & P_REG_40B_LOW_M);
+ hi = (u32)(val >> P_REG_40B_HIGH_S);
+
+ status = ice_write_phy_reg_eth56g(hw, port, low_addr, lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+ low_addr, status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_eth56g(hw, port, high_addr, hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+ high_addr, status);
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers.
+ * This function checks that the caller has specified a known 64 bit register
+ * offset
+ */
+static enum ice_status
+ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u16 high_addr = low_addr + sizeof(u32);
+ enum ice_status status;
+ u32 lo, hi;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr))
+ return ICE_ERR_PARAM;
+
+ lo = ICE_LO_DWORD(val);
+ hi = ICE_HI_DWORD(val);
+
+ status = ice_write_phy_reg_eth56g(hw, port, low_addr, lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, status %d",
+ low_addr, status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_eth56g(hw, port, high_addr, hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, status %d",
+ high_addr, status);
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ * @lock_sbq: true to lock the sbq sq_lock (the usual case); false if the
+ * sq_lock has already been locked at a higher level
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ICE_PTP_ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+enum ice_status
+ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time,
+ bool lock_sbq)
+{
+ enum ice_status status;
+ u32 l_time, u_time;
+
+ l_time = ICE_LO_DWORD(time);
+ u_time = ICE_HI_DWORD(time);
+
+ /* Tx case */
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_TX_TIMER_INC_PRE_L,
+ l_time, lock_sbq);
+ if (status)
+ goto exit_err;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_TX_TIMER_INC_PRE_U,
+ u_time, lock_sbq);
+ if (status)
+ goto exit_err;
+
+ /* Rx case */
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L,
+ l_time, lock_sbq);
+ if (status)
+ goto exit_err;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_U,
+ u_time, lock_sbq);
+ if (status)
+ goto exit_err;
+
+ return ICE_SUCCESS;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, status %d\n",
+ port, status);
+ return status;
+}
+
+/**
+ * ice_ptp_read_phy_incval_eth56g - Read a PHY port's current incval
+ * @hw: pointer to the HW struct
+ * @port: the port to read
+ * @incval: on return, the time_clk_cyc incval for this port
+ *
+ * Read the time_clk_cyc increment value for a given PHY port.
+ */
+enum ice_status
+ice_ptp_read_phy_incval_eth56g(struct ice_hw *hw, u8 port, u64 *incval)
+{
+ enum ice_status status;
+
+ status = ice_read_40b_phy_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TIMETUS_L, status %d\n",
+ status);
+ return status;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "read INCVAL = 0x%016llx\n",
+ (unsigned long long)*incval);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ */
+enum ice_status
+ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port, u64 *tx_ts,
+ u64 *rx_ts)
+{
+ enum ice_status status;
+
+ /* Tx case */
+ status = ice_read_64b_phy_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
+ tx_ts);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, status %d\n",
+ status);
+ return status;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n",
+ (unsigned long long)*tx_ts);
+
+ /* Rx case */
+ status = ice_read_64b_phy_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
+ rx_ts);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, status %d\n",
+ status);
+ return status;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n",
+ (unsigned long long)*rx_ts);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_one_port_cmd_eth56g - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ */
+enum ice_status
+ice_ptp_one_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd, bool lock_sbq)
+{
+ enum ice_status status;
+ u32 cmd_val, val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_PHY_SRC;
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val |= PHY_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val |= PHY_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ break;
+ case ICE_PTP_READ_TIME:
+ cmd_val |= PHY_CMD_READ_TIME;
+ break;
+ default:
+ ice_warn(hw, "Unknown timer command %u\n", cmd);
+ return ICE_ERR_PARAM;
+ }
+
+ /* Tx case */
+ /* Read, modify, write */
+ status = ice_read_phy_reg_eth56g_lp(hw, port, PHY_REG_TX_TMR_CMD, &val,
+ lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, status %d\n",
+ status);
+ return status;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port, PHY_REG_TX_TMR_CMD, val,
+ lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, status %d\n",
+ status);
+ return status;
+ }
+
+ /* Rx case */
+ /* Read, modify, write */
+ status = ice_read_phy_reg_eth56g_lp(hw, port, PHY_REG_RX_TMR_CMD, &val,
+ lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, status %d\n",
+ status);
+ return status;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port, PHY_REG_RX_TMR_CMD, val,
+ lock_sbq);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, status %d\n",
+ status);
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_port_cmd_eth56g - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ * @lock_sbq: true if the sideband queue lock must be acquired
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static enum ice_status
+ice_ptp_port_cmd_eth56g(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
+ bool lock_sbq)
+{
+ enum ice_status status;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ if (!(hw->ena_lports & BIT(port)))
+ continue;
+
+ status = ice_ptp_one_port_cmd_eth56g(hw, port, cmd, lock_sbq);
+ if (status)
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_calc_fixed_tx_offset_eth56g - Calculated Fixed Tx offset for a port
+ * @hw: pointer to the HW struct
+ * @link_spd: the Link speed to calculate for
+ *
+ * Calculate the fixed offset due to known static latency data.
+ */
+static u64
+ice_calc_fixed_tx_offset_eth56g(struct ice_hw *hw,
+ enum ice_ptp_link_spd link_spd)
+{
+ u64 fixed_offset = 0;
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_tx_offset_eth56g - Configure total Tx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the PHY_REG_TOTAL_TX_OFFSET register with the total number of TUs to
+ * adjust Tx timestamps by.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+enum ice_status ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd = ICE_PTP_LNK_SPD_10G;
+ enum ice_status status;
+ u64 total_offset;
+
+ total_offset = ice_calc_fixed_tx_offset_eth56g(hw, link_spd);
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ status = ice_write_64b_phy_reg_eth56g(hw, port,
+ PHY_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (status)
+ return status;
+
+ return ice_write_phy_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
+}
+
+/**
+ * ice_calc_fixed_rx_offset_eth56g - Calculated the fixed Rx offset for a port
+ * @hw: pointer to HW struct
+ * @link_spd: The Link speed to calculate for
+ *
+ * Determine the fixed Rx latency for a given link speed.
+ */
+static u64
+ice_calc_fixed_rx_offset_eth56g(struct ice_hw *hw,
+ enum ice_ptp_link_spd link_spd)
+{
+ u64 fixed_offset = 0;
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_rx_offset_eth56g - Configure total Rx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the PHY_REG_TOTAL_RX_OFFSET register with the number of Time Units to
+ * adjust Rx timestamps by. This combines calculations from the Vernier offset
+ * measurements taken in hardware with some data about known fixed delay as
+ * well as adjusting for multi-lane alignment delay.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ */
+enum ice_status ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port)
+{
+ enum ice_status status;
+ u64 total_offset;
+
+ total_offset = ice_calc_fixed_rx_offset_eth56g(hw, 0);
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ status = ice_write_64b_phy_reg_eth56g(hw, port,
+ PHY_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (status)
+ return status;
+
+ return ice_write_phy_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
+}
+
+/**
+ * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
+ */
+static enum ice_status
+ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, u64 *phy_time,
+ u64 *phc_time)
+{
+ enum ice_status status;
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ status = ice_ptp_one_port_cmd_eth56g(hw, port, ICE_PTP_READ_TIME, true);
+ if (status)
+ return status;
+
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+ ice_ptp_clean_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ status = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
+ if (status)
+ return status;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ ice_warn(hw, "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, (unsigned long long)tx_time,
+ (unsigned long long)rx_time);
+
+ *phy_time = tx_time;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
+ */
+static enum ice_status ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ enum ice_status status;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return ICE_ERR_NOT_READY;
+ }
+
+ status = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time,
+ &phc_time);
+ if (status)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+ difference = phc_time - phy_time;
+
+ status = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference, true);
+ if (status)
+ goto err_unlock;
+
+ status = ice_ptp_one_port_cmd_eth56g(hw, port, ICE_PTP_ADJ_TIME, true);
+ if (status)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+ ice_ptp_clean_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+
+ status = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time,
+ &phc_time);
+ if (status)
+ goto err_unlock;
+
+ ice_info(hw, "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, (unsigned long long)phy_time,
+ (unsigned long long)phc_time);
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return status;
+}
+
+/**
+ * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ */
+enum ice_status
+ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ enum ice_status status;
+
+ status = ice_write_phy_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
+ if (status)
+ return status;
+
+ status = ice_write_phy_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
+ if (status)
+ return status;
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_start_phy_timer_eth56g - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ * @bypass: unused, for compatibility
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ */
+enum ice_status
+ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool bypass)
+{
+ enum ice_status status;
+ u32 lo, hi;
+ u64 incval;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ status = ice_stop_phy_timer_eth56g(hw, port, false);
+ if (status)
+ return status;
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ status = ice_write_40b_phy_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (status)
+ return status;
+
+ status = ice_ptp_one_port_cmd_eth56g(hw, port, ICE_PTP_INIT_INCVAL,
+ true);
+ if (status)
+ return status;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ status = ice_sync_phy_timer_eth56g(hw, port);
+ if (status)
+ return status;
+
+ /* Program the Tx offset */
+ status = ice_phy_cfg_tx_offset_eth56g(hw, port);
+ if (status)
+ return status;
+
+ /* Program the Rx offset */
+ status = ice_phy_cfg_rx_offset_eth56g(hw, port);
+ if (status)
+ return status;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_read_tx_hwtstamp_status_eth56g - Get the current TX timestamp
+ * status mask. Returns the mask of ports where TX timestamps are available
+ * @hw: pointer to the HW struct
+ * @ts_status: the timestamp mask pointer
+ */
+enum ice_status
+ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
+{
+ enum ice_status status;
+
+ status = ice_read_phy_eth56g_raw_lp(hw, PHY_PTP_INT_STATUS, ts_status,
+ true);
+ if (status)
+ return status;
+
+ ice_debug(hw, ICE_DBG_PTP, "PHY interrupt status: %x\n", *ts_status);
+
+ return ICE_SUCCESS;
+}
+
+/* ----------------------------------------------------------------------------
+ * E822 family functions
*
* The following functions operate on the E822 family of devices.
*/
@@ -1013,7 +1982,7 @@ static enum ice_status ice_ptp_init_phc_e822(struct ice_hw *hw)
* @time: Time to initialize the PHY port clocks to
*
* Program the PHY port registers with a new initial time value. The port
- * clock will be initialized once the driver issues an INIT_TIME sync
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
* command. The time value is the upper 32 bits of the PHY timer, usually in
* units of nominal nanoseconds.
*/
@@ -1065,7 +2034,7 @@ ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
*
* Program the port for an atomic adjustment by writing the Tx and Rx timer
* registers. The atomic adjustment won't be completed until the driver issues
- * an ADJ_TIME command.
+ * an ICE_PTP_ADJ_TIME command.
*
* Note that time is not in units of nanoseconds. It is in clock time
* including the lower sub-nanosecond portion of the port timer.
@@ -1121,7 +2090,7 @@ ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time,
*
* Prepare the PHY ports for an atomic time adjustment by programming the PHY
* Tx and Rx port registers. The actual adjustment is completed by issuing an
- * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static enum ice_status
ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq)
@@ -1157,7 +2126,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj, bool lock_sbq)
*
* Prepare each of the PHY ports for a new increment value by programming the
* port's TIMETUS registers. The new increment value will be updated after
- * issuing an INIT_INCVAL command.
+ * issuing an ICE_PTP_INIT_INCVAL command.
*/
static enum ice_status
ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
@@ -1213,7 +2182,7 @@ ice_ptp_read_phy_incval_e822(struct ice_hw *hw, u8 port, u64 *incval)
* @target_time: target time to program
*
* Program the PHY port Tx and Rx TIMER_CNT_ADJ registers used for the
- * ADJ_TIME_AT_TIME command. This should be used in conjunction with
+ * ICE_PTP_ADJ_TIME_AT_TIME command. This should be used in conjunction with
* ice_ptp_prep_phy_adj_e822 to program an atomic adjustment that is
* delayed until a specified target time.
*
@@ -1331,19 +2300,19 @@ ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd,
tmr_idx = ice_get_ptp_src_clock_index(hw);
cmd_val = tmr_idx << SEL_PHY_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= PHY_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= PHY_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= PHY_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= PHY_CMD_READ_TIME;
break;
default:
@@ -2300,8 +3269,8 @@ ice_phy_cfg_fixed_rx_offset_e822(struct ice_hw *hw, u8 port)
* @phy_time: on return, the 64bit PHY timer value
* @phc_time: on return, the lower 64bits of PHC time
*
- * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
- * timer values.
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
*/
static enum ice_status
ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
@@ -2314,15 +3283,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
tmr_idx = ice_get_ptp_src_clock_index(hw);
- /* Prepare the PHC timer for a READ_TIME capture command */
- ice_ptp_src_cmd(hw, READ_TIME);
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
- /* Prepare the PHY timer for a READ_TIME capture command */
- status = ice_ptp_one_port_cmd_e822(hw, port, READ_TIME, true);
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_READ_TIME, true);
if (status)
return status;
- /* Issue the sync to start the READ_TIME capture */
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
@@ -2355,10 +3324,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* @port: the PHY port to synchronize
*
* Perform an adjustment to ensure that the PHY and PHC timers are in sync.
- * This is done by issuing a READ_TIME command which triggers a simultaneous
- * read of the PHY timer and PHC timer. Then we use the difference to
- * calculate an appropriate 2s complement addition to add to the PHY timer in
- * order to ensure it reads the same value as the primary PHC timer.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
*/
static enum ice_status ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
{
@@ -2388,10 +3358,13 @@ static enum ice_status ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (status)
goto err_unlock;
- status = ice_ptp_one_port_cmd_e822(hw, port, ADJ_TIME, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_ADJ_TIME, true);
if (status)
goto err_unlock;
+ /* Init PHC mstr/src cmd for exec during sync */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
/* Issue the sync to activate the time adjustment */
ice_ptp_exec_tmr_cmd(hw);
@@ -2513,10 +3486,13 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
if (status)
return status;
- status = ice_ptp_one_port_cmd_e822(hw, port, INIT_INCVAL, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_INIT_INCVAL, true);
if (status)
return status;
+ /* Init PHC mstr/src cmd for exec during sync */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
ice_ptp_exec_tmr_cmd(hw);
status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
@@ -2538,7 +3514,7 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
if (status)
return status;
- status = ice_ptp_one_port_cmd_e822(hw, port, INIT_INCVAL, true);
+ status = ice_ptp_one_port_cmd_e822(hw, port, ICE_PTP_INIT_INCVAL, true);
if (status)
return status;
@@ -2870,7 +3846,7 @@ static enum ice_status ice_ptp_init_phc_e810(struct ice_hw *hw)
*
* Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
* initial clock time. The time will not actually be programmed until the
- * driver issues an INIT_TIME command.
+ * driver issues an ICE_PTP_INIT_TIME command.
*
* The time value is the upper 32 bits of the PHY timer, usually in units of
* nominal nanoseconds.
@@ -2906,7 +3882,7 @@ static enum ice_status ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*
* Prepare the PHY port for an atomic adjustment by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
- * is completed by issuing an ADJ_TIME sync command.
+ * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
*
* The adjustment value only contains the portion used for the upper 32bits of
* the PHY timer, usually in units of nominal nanoseconds. Negative
@@ -2949,7 +3925,7 @@ ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj, bool lock_sbq)
*
* Prepare the PHY port for a new increment value by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
- * completed by issuing an INIT_INCVAL command.
+ * completed by issuing an ICE_PTP_INIT_INCVAL command.
*/
static enum ice_status
ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
@@ -2987,8 +3963,8 @@ ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
* Program the PHY port ETH_GLTSYN_SHTIME registers in preparation for
* a target time adjust, which will trigger an adjustment of the clock in the
* future. The actual adjustment will occur the next time the PHY port timer
- * crosses over the provided value after the driver issues an ADJ_TIME_AT_TIME
- * command.
+ * crosses over the provided value after the driver issues an
+ * ICE_PTP_ADJ_TIME_AT_TIME command.
*
* The time value is the upper 32 bits of the PHY timer, usually in units of
* nominal nanoseconds.
@@ -3035,19 +4011,19 @@ ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
u32 cmd_val, val;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val = GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val = GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val = GLTSYN_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val = GLTSYN_CMD_READ_TIME;
break;
default:
@@ -3375,19 +4351,19 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
cmd_val = tmr_idx << SEL_CPK_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= GLTSYN_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
default:
@@ -3418,10 +4394,19 @@ ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_port_cmd_eth56g(hw, cmd, lock_sbq);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_port_cmd_e810(hw, cmd, lock_sbq);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_port_cmd_e822(hw, cmd, lock_sbq);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
if (status) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
cmd, status);
@@ -3470,7 +4455,7 @@ enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time)
if (status)
return status;
- return ice_ptp_tmr_cmd(hw, INIT_TIME, true);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME, true);
}
/**
@@ -3483,8 +4468,8 @@ enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time)
*
* 1) Write the increment value to the source timer shadow registers
* 2) Write the increment value to the PHY timer shadow registers
- * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
- * source and port timers to the new increment value at the next clock
+ * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
+ * the source and port timers to the new increment value at the next clock
* cycle.
*/
enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
@@ -3505,7 +4490,7 @@ enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
if (status)
return status;
- return ice_ptp_tmr_cmd(hw, INIT_INCVAL, true);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL, true);
}
/**
@@ -3541,8 +4526,8 @@ enum ice_status ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*
* 1) Write the adjustment to the source timer shadow registers
* 2) Write the adjustment to the PHY timer shadow registers
- * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
- * both the source and port timers at the next clock cycle.
+ * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
+ * adjustment to both the source and port timers at the next clock cycle.
*/
enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq)
{
@@ -3566,7 +4551,7 @@ enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq)
if (status)
return status;
- return ice_ptp_tmr_cmd(hw, ADJ_TIME, lock_sbq);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME, lock_sbq);
}
/**
@@ -3582,7 +4567,8 @@ enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq)
* 2) Write the target time to the source timer shadow time registers
* 3) Write the adjustment to the PHY timers shadow adjust registers
* 4) Write the target time to the PHY timers shadow adjust registers
- * 5) Issue an ADJ_TIME_AT_TIME command to initiate the atomic adjustment.
+ * 5) Issue an ICE_PTP_ADJ_TIME_AT_TIME command to initiate the atomic
+ * adjustment.
*/
enum ice_status
ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
@@ -3596,9 +4582,9 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
time_hi = ICE_HI_DWORD(at_time);
/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
- * For an ADJ_TIME_AT_TIME command, this set of registers represents
- * the value to add to the clock time. It supports subtraction by
- * interpreting the value as a 2's complement integer.
+ * For an ICE_PTP_ADJ_TIME_AT_TIME command, this set of registers
+ * represents the value to add to the clock time. It supports
+ * subtraction by interpreting the value as a 2's complement integer.
*/
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
@@ -3624,7 +4610,7 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
if (status)
return status;
- return ice_ptp_tmr_cmd(hw, ADJ_TIME_AT_TIME, true);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME_AT_TIME, true);
}
/**
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index ecb79eaea9..a030a9d4ed 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -6,11 +6,12 @@
#define _ICE_PTP_HW_H_
enum ice_ptp_tmr_cmd {
- INIT_TIME,
- INIT_INCVAL,
- ADJ_TIME,
- ADJ_TIME_AT_TIME,
- READ_TIME
+ ICE_PTP_INIT_TIME,
+ ICE_PTP_INIT_INCVAL,
+ ICE_PTP_ADJ_TIME,
+ ICE_PTP_ADJ_TIME_AT_TIME,
+ ICE_PTP_READ_TIME,
+ ICE_PTP_NOP,
};
enum ice_ptp_serdes {
@@ -232,6 +233,39 @@ enum ice_status ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
enum ice_status ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
bool ice_is_pca9575_present(struct ice_hw *hw);
+void
+ice_ptp_process_cgu_err(struct ice_hw *hw, struct ice_rq_event_info *event);
+/* ETH56G family functions */
+enum ice_status
+ice_read_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+enum ice_status
+ice_write_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+enum ice_status
+ice_read_phy_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+enum ice_status
+ice_write_phy_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+
+enum ice_status
+ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time,
+ bool lock_sbq);
+
+enum ice_status
+ice_ptp_read_phy_incval_eth56g(struct ice_hw *hw, u8 port, u64 *incval);
+enum ice_status
+ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts);
+enum ice_status
+ice_ptp_one_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd, bool lock_sbq);
+enum ice_status
+ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
+enum ice_status
+ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
+enum ice_status
+ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool bypass);
+enum ice_status ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
+enum ice_status ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 15b12bfc8d..a17accff19 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1135,6 +1135,13 @@ struct ice_switch_info {
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
+/* PHY configuration */
+enum ice_phy_cfg {
+ ICE_PHY_E810 = 1,
+ ICE_PHY_E822,
+ ICE_PHY_ETH56G,
+};
+
/* Port hardware description */
struct ice_hw {
u8 *hw_addr;
@@ -1159,6 +1166,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ enum ice_phy_cfg phy_cfg;
u8 logical_pf_id;
u16 max_burst_size; /* driver sets this value */
@@ -1233,6 +1241,9 @@ struct ice_hw {
#define ICE_PORTS_PER_PHY 8
#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+ /* bitmap of enabled logical ports */
+ u32 ena_lports;
+
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 pkg_seg_id;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 16/70] net/ice/base: implement 56G PHY setup functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (14 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 15/70] net/ice/base: implement 56G PHY access functions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 17/70] net/ice/base: work around missing PTP caps Qi Zhang
` (54 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Implement setup functions for the 56G PHY Simics model
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 29 +++++++++++++++++++++++++++++
drivers/net/ice/base/ice_ptp_hw.h | 2 ++
2 files changed, 31 insertions(+)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 1c5fd799f6..093331331d 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -1363,6 +1363,35 @@ ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
return ICE_SUCCESS;
}
+/**
+ * ice_ptp_init_phy_cfg - Get the current TX timestamp status
+ * mask. Returns the mask of ports where TX timestamps are available
+ * @hw: pointer to the HW struct
+ */
+enum ice_status
+ice_ptp_init_phy_cfg(struct ice_hw *hw)
+{
+ enum ice_status status;
+ u32 phy_rev;
+
+ status = ice_read_phy_eth56g_raw_lp(hw, PHY_REG_REVISION, &phy_rev,
+ true);
+ if (status)
+ return status;
+
+ if (phy_rev == PHY_REVISION_ETH56G) {
+ hw->phy_cfg = ICE_PHY_ETH56G;
+ return ICE_SUCCESS;
+ }
+
+ if (ice_is_e810(hw))
+ hw->phy_cfg = ICE_PHY_E810;
+ else
+ hw->phy_cfg = ICE_PHY_E822;
+
+ return ICE_SUCCESS;
+}
+
/* ----------------------------------------------------------------------------
* E822 family functions
*
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index a030a9d4ed..1e016ef177 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -266,6 +266,8 @@ ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool bypass);
enum ice_status ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
enum ice_status ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
+enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw);
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 17/70] net/ice/base: work around missing PTP caps
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (15 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 16/70] net/ice/base: implement 56G PHY setup functions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 18/70] net/ice/base: enable calling of ETH56G functions Qi Zhang
` (53 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Provide a WA for missing PTP caps on Simics, this code shall be
removed after cap reporting is fixed
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index e22600c46d..cedce2dcf5 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2516,7 +2516,12 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
- u32 number = LE32_TO_CPU(cap->number);
+ u32 number = ICE_TS_FUNC_ENA_M | ICE_TS_SRC_TMR_OWND_M |
+ ICE_TS_TMR_ENA_M | ICE_TS_TMR_IDX_OWND_M |
+ ICE_TS_TMR_IDX_ASSOC_M;
+ u8 clk_freq;
+
+ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: raw value %x\n", number);
info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
func_p->common_cap.ieee_1588 = info->ena;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 18/70] net/ice/base: enable calling of ETH56G functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (16 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 17/70] net/ice/base: work around missing PTP caps Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 19/70] net/ice/base: fix PHY type 10G SFI C2C to media type mapping Qi Zhang
` (52 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Enable calling of ETH56G functions in the base code when the
appropriate PHY has been detected
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 523 ++++++++++++++++++++++++++++--
1 file changed, 498 insertions(+), 25 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 093331331d..1fb0c57a8c 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -502,6 +502,29 @@ ice_phy_port_reg_address_eth56g(u8 port, u16 offset, u32 *address)
return ICE_SUCCESS;
}
+/**
+ * ice_phy_port_mem_address_eth56g - Calculate a PHY port memory address
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @address: The result address
+ */
+static enum ice_status
+ice_phy_port_mem_address_eth56g(u8 port, u16 offset, u32 *address)
+{
+ u8 phy, lane;
+
+ if (port >= ICE_NUM_EXTERNAL_PORTS)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ phy = port / ICE_PORTS_PER_QUAD;
+ lane = port % ICE_PORTS_PER_QUAD;
+
+ *address = offset + eth56g_port_base[phy] +
+ PHY_PTP_MEM_START + PHY_PTP_MEM_LANE_STEP * lane;
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_write_phy_reg_eth56g_lp - Write a PHY port register with lock parameter
* @hw: pointer to the HW struct
@@ -573,6 +596,80 @@ ice_read_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
return ice_read_phy_reg_eth56g_lp(hw, port, offset, val, true);
}
+/**
+ * ice_phy_port_mem_read_eth56g_lp - Read a PHY port memory location
+ * with lock parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_phy_port_mem_read_eth56g_lp(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val, bool lock_sbq)
+{
+ enum ice_status status;
+ u32 mem_addr;
+
+ status = ice_phy_port_mem_address_eth56g(port, offset, &mem_addr);
+ if (status)
+ return status;
+
+ return ice_read_phy_eth56g_raw_lp(hw, mem_addr, val, lock_sbq);
+}
+
+/**
+ * ice_phy_port_mem_read_eth56g - Read a PHY port memory location with
+ * sbq locked
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ */
+static enum ice_status
+ice_phy_port_mem_read_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ return ice_phy_port_mem_read_eth56g_lp(hw, port, offset, val, true);
+}
+
+/**
+ * ice_phy_port_mem_write_eth56g_lp - Write a PHY port memory location with
+ * lock parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ * @lock_sbq: true to lock the sideband queue
+ */
+static enum ice_status
+ice_phy_port_mem_write_eth56g_lp(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val, bool lock_sbq)
+{
+ enum ice_status status;
+ u32 mem_addr;
+
+ status = ice_phy_port_mem_address_eth56g(port, offset, &mem_addr);
+ if (status)
+ return status;
+
+ return ice_write_phy_eth56g_raw_lp(hw, mem_addr, val, lock_sbq);
+}
+
+/**
+ * ice_phy_port_mem_write_eth56g - Write a PHY port memory location with
+ * sbq locked
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ */
+static enum ice_status
+ice_phy_port_mem_write_eth56g(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ return ice_phy_port_mem_write_eth56g_lp(hw, port, offset, val, true);
+}
+
/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
@@ -778,6 +875,140 @@ ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
return ICE_SUCCESS;
}
+/**
+ * ice_read_phy_tstamp_eth56g - Read a PHY timestamp out of the port memory
+ * @hw: pointer to the HW struct
+ * @port: the port to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated entries in the
+ * port memory block of the internal PHYs of the 56G devices.
+ */
+static enum ice_status
+ice_read_phy_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, u64 *tstamp)
+{
+ enum ice_status status;
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+ hi_addr = (u16)PHY_TSTAMP_U(idx);
+
+ status = ice_phy_port_mem_read_eth56g(hw, port, lo_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_phy_port_mem_read_eth56g(hw, port, hi_addr, &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ /* For 56G based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_phy_tstamp_eth56g - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @port: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, in the PHY port memory of
+ * internal PHYs of the 56G devices.
+ */
+static enum ice_status
+ice_clear_phy_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
+{
+ enum ice_status status;
+ u16 lo_addr;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+
+ status = ice_phy_port_mem_write_eth56g(hw, port, lo_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptp_prep_port_phy_time_eth56g - Prepare one PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @port: port number
+ * @phy_time: time to initialize the PHY port clocks to
+ *
+ * Write a new initial time value into registers of a specific PHY port.
+ */
+static enum ice_status
+ice_ptp_prep_port_phy_time_eth56g(struct ice_hw *hw, u8 port, u64 phy_time)
+{
+ enum ice_status status;
+
+ /* Tx case */
+ status = ice_write_64b_phy_reg_eth56g(hw, port,
+ PHY_REG_TX_TIMER_INC_PRE_L,
+ phy_time);
+ if (status)
+ return status;
+
+ /* Rx case */
+ return ice_write_64b_phy_reg_eth56g(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L,
+ phy_time);
+}
+
+/**
+ * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static enum ice_status
+ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
+{
+ enum ice_status status;
+ u64 phy_time;
+ u8 port;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ if (!(hw->ena_lports & BIT(port)))
+ continue;
+ status = ice_ptp_prep_port_phy_time_eth56g(hw, port,
+ phy_time);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
* @hw: pointer to HW struct
@@ -839,6 +1070,74 @@ ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time,
return status;
}
+/**
+ * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ * @lock_sbq: true to lock the sbq sq_lock (the usual case); false if the
+ * sq_lock has already been locked at a higher level
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj, bool lock_sbq)
+{
+ enum ice_status status = ICE_SUCCESS;
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock. We shift the provided adjustment in nanoseconds to
+ * calculate the appropriate adjustment to program into the PHY ports.
+ */
+ cycles = (s64)adj << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ if (!(hw->ena_lports & BIT(port)))
+ continue;
+
+ status = ice_ptp_prep_port_adj_eth56g(hw, port, cycles,
+ lock_sbq);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an ICE_PTP_INIT_INCVAL command.
+ */
+static enum ice_status
+ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
+{
+ enum ice_status status;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ if (!(hw->ena_lports & BIT(port)))
+ continue;
+ status = ice_write_40b_phy_reg_eth56g(hw, port,
+ PHY_REG_TIMETUS_L,
+ incval);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_ptp_read_phy_incval_eth56g - Read a PHY port's current incval
* @hw: pointer to the HW struct
@@ -866,6 +1165,67 @@ ice_ptp_read_phy_incval_eth56g(struct ice_hw *hw, u8 port, u64 *incval)
return ICE_SUCCESS;
}
+/**
+ * ice_ptp_prep_phy_adj_target_eth56g - Prepare PHY for adjust at target time
+ * @hw: pointer to HW struct
+ * @target_time: target time to program
+ *
+ * Program the PHY port Tx and Rx TIMER_CNT_ADJ registers used for the
+ * ICE_PTP_ADJ_TIME_AT_TIME command. This should be used in conjunction with
+ * ice_ptp_prep_phy_adj_eth56g to program an atomic adjustment that is
+ * delayed until a specified target time.
+ *
+ * Note that a target time adjustment is not currently supported on E810
+ * devices.
+ */
+static enum ice_status
+ice_ptp_prep_phy_adj_target_eth56g(struct ice_hw *hw, u32 target_time)
+{
+ enum ice_status status;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ if (!(hw->ena_lports & BIT(port)))
+ continue;
+
+ /* Tx case */
+ /* No sub-nanoseconds data */
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_TX_TIMER_CNT_ADJ_L,
+ 0, true);
+ if (status)
+ goto exit_err;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_TX_TIMER_CNT_ADJ_U,
+ target_time, true);
+ if (status)
+ goto exit_err;
+
+ /* Rx case */
+ /* No sub-nanoseconds data */
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_RX_TIMER_CNT_ADJ_L,
+ 0, true);
+ if (status)
+ goto exit_err;
+
+ status = ice_write_phy_reg_eth56g_lp(hw, port,
+ PHY_REG_RX_TIMER_CNT_ADJ_U,
+ target_time, true);
+ if (status)
+ goto exit_err;
+ }
+
+ return ICE_SUCCESS;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write target time for port %u, status %d\n",
+ port, status);
+
+ return status;
+}
+
/**
* ice_ptp_read_port_capture_eth56g - Read a port's local time capture
* @hw: pointer to HW struct
@@ -1342,6 +1702,31 @@ ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool bypass)
return ICE_SUCCESS;
}
+/**
+ * ice_ptp_init_phc_eth56g - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static enum ice_status ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u32 regval;
+
+ /* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+ regval = rd32(hw, PF_SB_REM_DEV_CTL);
+ regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+ PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+ /* Initialize the Clock Generation Unit */
+ status = ice_init_cgu_e822(hw);
+
+ return status;
+}
+
/**
* ice_ptp_read_tx_hwtstamp_status_eth56g - Get the current TX timestamp
* status mask. Returns the mask of ports where TX timestamps are available
@@ -4477,10 +4862,20 @@ enum ice_status ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY Clks */
/* Fill Rx and Tx ports and send msg to PHY */
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_prep_phy_time_eth56g(hw, time & 0xFFFFFFFF);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
if (status)
return status;
@@ -4512,10 +4907,20 @@ enum ice_status ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), ICE_LO_DWORD(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), ICE_HI_DWORD(incval));
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_prep_phy_incval_e810(hw, incval);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_prep_phy_incval_e822(hw, incval);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
if (status)
return status;
@@ -4566,17 +4971,27 @@ enum ice_status ice_ptp_adj_clock(struct ice_hw *hw, s32 adj, bool lock_sbq)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
- * For an ADJ_TIME command, this set of registers represents the value
- * to add to the clock time. It supports subtraction by interpreting
- * the value as a 2's complement integer.
+ * For an ICE_PTP_ADJ_TIME command, this set of registers represents
+ * the value to add to the clock time. It supports subtraction by
+ * interpreting the value as a 2's complement integer.
*/
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_prep_phy_adj_eth56g(hw, adj, lock_sbq);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_prep_phy_adj_e810(hw, adj, lock_sbq);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_prep_phy_adj_e822(hw, adj, lock_sbq);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
if (status)
return status;
@@ -4624,18 +5039,38 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), time_hi);
/* Prepare PHY port adjustments */
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_prep_phy_adj_eth56g(hw, adj, true);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_prep_phy_adj_e810(hw, adj, true);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_prep_phy_adj_e822(hw, adj, true);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
if (status)
return status;
/* Set target time for each PHY port */
- if (ice_is_e810(hw))
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_prep_phy_adj_target_eth56g(hw, time_lo);
+ break;
+ case ICE_PHY_E810:
status = ice_ptp_prep_phy_adj_target_e810(hw, time_lo);
- else
+ break;
+ case ICE_PHY_E822:
status = ice_ptp_prep_phy_adj_target_e822(hw, time_lo);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
if (status)
return status;
@@ -4656,10 +5091,23 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
enum ice_status
ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- if (ice_is_e810(hw))
- return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- else
- return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ enum ice_status status;
+
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_read_phy_tstamp_eth56g(hw, block, idx, tstamp);
+ break;
+ case ICE_PHY_E810:
+ status = ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ break;
+ case ICE_PHY_E822:
+ status = ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
+ return status;
}
/**
@@ -4675,10 +5123,23 @@ ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
enum ice_status
ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- if (ice_is_e810(hw))
- return ice_clear_phy_tstamp_e810(hw, block, idx);
- else
- return ice_clear_phy_tstamp_e822(hw, block, idx);
+ enum ice_status status;
+
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_clear_phy_tstamp_eth56g(hw, block, idx);
+ break;
+ case ICE_PHY_E810:
+ status = ice_clear_phy_tstamp_e810(hw, block, idx);
+ break;
+ case ICE_PHY_E822:
+ status = ice_clear_phy_tstamp_e822(hw, block, idx);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
+ return status;
}
/**
@@ -4689,6 +5150,7 @@ ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
*/
enum ice_status ice_ptp_init_phc(struct ice_hw *hw)
{
+ enum ice_status status;
u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Enable source clocks */
@@ -4697,8 +5159,19 @@ enum ice_status ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event status indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- if (ice_is_e810(hw))
- return ice_ptp_init_phc_e810(hw);
- else
- return ice_ptp_init_phc_e822(hw);
+ switch (hw->phy_cfg) {
+ case ICE_PHY_ETH56G:
+ status = ice_ptp_init_phc_eth56g(hw);
+ break;
+ case ICE_PHY_E810:
+ status = ice_ptp_init_phc_e810(hw);
+ break;
+ case ICE_PHY_E822:
+ status = ice_ptp_init_phc_e822(hw);
+ break;
+ default:
+ status = ICE_ERR_NOT_SUPPORTED;
+ }
+
+ return status;
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 19/70] net/ice/base: fix PHY type 10G SFI C2C to media type mapping
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (17 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 18/70] net/ice/base: enable calling of ETH56G functions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 20/70] net/ice/base: refactor DDP code Qi Zhang
` (51 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Paul Greenwalt
PHY type ICE_PHY_TYPE_LOW_10G_SFI_C2C is incorrectly mapped to media
type Fiber which results in ethtool reporting the wrong Supported
ports.
PHY type ICE_PHY_TYPE_LOW_10G_SFI_C2C should map to media type
Backplane.
Fixes: 453d087ccaff ("net/ice/base: add common functions")
Cc: stable@dpdk.org
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index cedce2dcf5..57602a31e1 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -561,7 +561,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_1000BASE_LX:
case ICE_PHY_TYPE_LOW_10GBASE_SR:
case ICE_PHY_TYPE_LOW_10GBASE_LR:
- case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
@@ -618,6 +617,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_2500BASE_X:
case ICE_PHY_TYPE_LOW_5GBASE_KR:
case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_KR:
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 20/70] net/ice/base: refactor DDP code
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (18 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 19/70] net/ice/base: fix PHY type 10G SFI C2C to media type mapping Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 6:44 ` Yang, Qiming
2022-08-15 7:31 ` [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID Qi Zhang
` (50 subsequent siblings)
70 siblings, 1 reply; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang
Cc: dev, Qi Zhang, Sergey Temerkhanov, Wojciech Drewek, Dan Nowlin
Move DDP related into ice_ddp.c.
Refactor status flow for DDP load.
Aslo added support for DDP signature segments.
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Wojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: Dan Nowlin <dan.nowlin@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 32 +
drivers/net/ice/base/ice_bitops.h | 5 +-
drivers/net/ice/base/ice_ddp.c | 2475 +++++++++++++++++++++++++
drivers/net/ice/base/ice_ddp.h | 466 +++++
drivers/net/ice/base/ice_defs.h | 49 +
drivers/net/ice/base/ice_flex_pipe.c | 2175 ++--------------------
drivers/net/ice/base/ice_flex_pipe.h | 57 +-
drivers/net/ice/base/ice_flex_type.h | 286 +--
drivers/net/ice/base/ice_switch.c | 36 +-
drivers/net/ice/base/ice_type.h | 54 +-
drivers/net/ice/base/ice_vlan_mode.c | 1 +
drivers/net/ice/base/meson.build | 1 +
12 files changed, 3233 insertions(+), 2404 deletions(-)
create mode 100644 drivers/net/ice/base/ice_ddp.c
create mode 100644 drivers/net/ice/base/ice_ddp.h
create mode 100644 drivers/net/ice/base/ice_defs.h
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 517af4b6ef..8f7e13096c 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -9,10 +9,19 @@
* descriptor format. It is shared between Firmware and Software.
*/
+#include "ice_osdep.h"
+#include "ice_defs.h"
+#include "ice_bitops.h"
+
#define ICE_MAX_VSI 768
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+enum ice_aq_res_access_type {
+ ICE_RES_READ = 1,
+ ICE_RES_WRITE
+};
+
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@@ -1035,6 +1044,24 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0
+#define ICE_AQC_TX_TOPO_GET_PSM 1
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -3008,6 +3035,7 @@ struct ice_aq_desc {
struct ice_aqc_clear_health_status clear_health_status;
struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm;
struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@@ -3164,6 +3192,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_node_to_root = 0x0413,
ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+ ice_aqc_opc_cfg_node_attr = 0x0419,
+ ice_aqc_opc_query_node_attr = 0x041A,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
diff --git a/drivers/net/ice/base/ice_bitops.h b/drivers/net/ice/base/ice_bitops.h
index 21ec2014e1..8060c103fa 100644
--- a/drivers/net/ice/base/ice_bitops.h
+++ b/drivers/net/ice/base/ice_bitops.h
@@ -5,6 +5,9 @@
#ifndef _ICE_BITOPS_H_
#define _ICE_BITOPS_H_
+#include "ice_defs.h"
+#include "ice_osdep.h"
+
/* Define the size of the bitmap chunk */
typedef u32 ice_bitmap_t;
@@ -13,7 +16,7 @@ typedef u32 ice_bitmap_t;
/* Determine which chunk a bit belongs in */
#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
/* How many chunks are required to store this many bits */
-#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
+#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK)
/* Which bit inside a chunk this bit corresponds to */
#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
/* How many bits are valid in the last chunk, assumes nr > 0 */
diff --git a/drivers/net/ice/base/ice_ddp.c b/drivers/net/ice/base/ice_ddp.c
new file mode 100644
index 0000000000..d1cae48047
--- /dev/null
+++ b/drivers/net/ice/base/ice_ddp.c
@@ -0,0 +1,2475 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#include "ice_ddp.h"
+#include "ice_type.h"
+#include "ice_common.h"
+#include "ice_sched.h"
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+ if (LE32_TO_CPU(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_get_pkg_seg_by_idx
+ * @pkg_hdr: pointer to the package header to be searched
+ * @idx: index of segment
+ */
+static struct ice_generic_seg_hdr *
+ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg = NULL;
+
+ if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr +
+ LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
+
+ return seg;
+}
+
+/**
+ * ice_is_signing_seg_at_idx - determine if segment is a signing segment
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ */
+static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg;
+ bool retval = false;
+
+ seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (seg)
+ retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING;
+
+ return retval;
+}
+
+/**
+ * ice_is_signing_seg_type_at_idx
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ *
+ * Determine if a segment is a signing segment of the correct type
+ */
+static bool
+ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
+ u32 seg_id, u32 sign_type)
+{
+ bool result = false;
+
+ if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
+ struct ice_sign_seg *seg;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
+ idx);
+ if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
+ LE32_TO_CPU(seg->sign_type) == sign_type)
+ result = true;
+ }
+
+ return result;
+}
+
+/**
+ * ice_update_pkg_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ */
+enum ice_status
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
+
+ status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+static enum ice_ddp_state
+ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ return ICE_DDP_PKG_NO_SEC_MANIFEST;
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ return ICE_DDP_PKG_MANIFEST_INVALID;
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_BUFFER_INVALID;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ bool metadata = false;
+
+ if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
+ metadata = true;
+
+ return metadata;
+}
+
+/**
+ * ice_is_last_download_buffer
+ * @buf: pointer to current buffer header
+ * @idx: index of the buffer in the current sequence
+ * @count: the buffer count in the current sequence
+ *
+ * Note: this routine should only be called if the buffer is not the last buffer
+ */
+static bool
+ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
+{
+ bool last = ((idx + 1) == count);
+
+ /* A set metadata flag in the next buffer will signal that the current
+ * buffer will be the last buffer downloaded
+ */
+ if (!last) {
+ struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
+
+ last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
+ }
+
+ return last;
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ * @indicate_last: if true, then set last buffer flag on last buffer download
+ *
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ u32 count, bool indicate_last)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
+ u32 offset, info, i;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)(bufs + start);
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ for (i = 0; i < count; i++) {
+ enum ice_status status;
+ bool last = false;
+
+ bh = (struct ice_buf_hdr *)(bufs + start + i);
+
+ if (indicate_last)
+ last = ice_is_last_download_buffer(bh, i, count);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_has_signing_seg - determine if package has a signing segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ */
+static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
+
+ return seg_hdr ? true : false;
+}
+
+/**
+ * ice_get_pkg_segment_id - get correct package segment id, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
+{
+ u32 seg_id;
+
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K:
+ default:
+ seg_id = SEGMENT_TYPE_ICE_E810;
+ break;
+ }
+
+ return seg_id;
+}
+
+/**
+ * ice_get_pkg_sign_type - get package segment sign type, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
+{
+ u32 sign_type;
+
+ switch (mac_type) {
+ case ICE_MAC_GENERIC_3K:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ sign_type = SEGMENT_SIGN_TYPE_RSA2K;
+ break;
+ }
+
+ return sign_type;
+}
+
+/**
+ * ice_get_signing_req - get correct package requirements, based on device
+ * @hw: pointer to the hardware structure
+ */
+static void ice_get_signing_req(struct ice_hw *hw)
+{
+ hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
+ hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
+}
+
+/**
+ * ice_download_pkg_sig_seg - download a signature segment
+ * @hw: pointer to the hardware structure
+ * @seg: pointer to signature segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+{
+ enum ice_ddp_state state;
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
+ LE32_TO_CPU(seg->buf_tbl.buf_count),
+ false);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_config_seg - download a config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @start: starting buffer
+ * @count: buffer count
+ *
+ * Note: idx must reference a ICE segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx, u32 start, u32 count)
+{
+ struct ice_buf_table *bufs;
+ enum ice_ddp_state state;
+ struct ice_seg *seg;
+ u32 buf_count;
+
+ seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return ICE_DDP_PKG_ERR;
+
+ bufs = ice_find_buf_table(seg);
+ buf_count = LE32_TO_CPU(bufs->buf_count);
+
+ if (start >= buf_count || start + count > buf_count)
+ return ICE_DDP_PKG_ERR;
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
+ true);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index (must be a signature segment)
+ *
+ * Note: idx must reference a signature segment
+ */
+static enum ice_ddp_state
+ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx)
+{
+ enum ice_ddp_state state;
+ struct ice_sign_seg *seg;
+ u32 conf_idx;
+ u32 start;
+ u32 count;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg) {
+ state = ICE_DDP_PKG_ERR;
+ goto exit;
+ }
+
+ conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
+ start = LE32_TO_CPU(seg->signed_buf_start);
+ count = LE32_TO_CPU(seg->signed_buf_count);
+
+ state = ice_download_pkg_sig_seg(hw, seg);
+ if (state)
+ goto exit;
+
+ state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ count);
+
+exit:
+ return state;
+}
+
+/**
+ * ice_match_signing_seg - determine if a matching signing segment exists
+ * @pkg_hdr: pointer to package header
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ */
+static bool
+ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
+{
+ bool match = false;
+ u32 i;
+
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
+ sign_type)) {
+ match = true;
+ break;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * ice_post_dwnld_pkg_actions - perform post download package actions
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_ddp_state
+ice_post_dwnld_pkg_actions(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ enum ice_status status;
+
+ status = ice_set_vlan_mode(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ state = ICE_DDP_PKG_ERR;
+ }
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_with_sig_seg - download package using signature segments
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ */
+static enum ice_ddp_state
+ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ enum ice_status status;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
+ ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ state = ICE_DDP_PKG_ALREADY_LOADED;
+ else
+ state = ice_map_aq_err_to_ddp_state(aq_err);
+ return state;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ continue;
+
+ state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ if (state)
+ break;
+ }
+
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware.
+ */
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ enum ice_status status;
+ struct ice_buf_hdr *bh;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_download_pkg_without_sig_seg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package without signature segment.
+ */
+static enum ice_ddp_state
+ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+ enum ice_ddp_state state;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ LE32_TO_CPU(ice_seg->hdr.seg_type),
+ LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ return state;
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ struct ice_seg *ice_seg)
+{
+ enum ice_ddp_state state;
+
+ if (hw->pkg_has_signing_seg)
+ state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
+ else
+ state = ice_download_pkg_without_sig_seg(hw, ice_seg);
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return state;
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_ddp_state
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_DDP_PKG_ERR;
+
+ hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
+ ice_get_signing_req(hw);
+
+ ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
+ hw->pkg_seg_id);
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ /* Get package information from the Metadata Section */
+ meta = (struct ice_meta_sect *)
+ ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ hw->pkg_ver = meta->ver;
+ ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
+ sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ u16 size;
+ u32 i;
+
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
+ pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg_info)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto init_pkg_free_alloc;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
+ ice_memcpy(hw->active_pkg_name,
+ pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name),
+ ICE_NONDMA_TO_NONDMA);
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+ i, pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ ice_free(hw, pkg_info);
+
+ return state;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = (struct ice_label_section *)section;
+ if (index >= LE16_TO_CPU(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+ NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = LE16_TO_CPU(label->value);
+ return label->name;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < ice_struct_size(pkg, seg_offset, 1))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* pkg must have at least one segment */
+ seg_count = LE32_TO_CPU(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* make sure segment array fits in package length */
+ if (len < ice_struct_size(pkg, seg_offset, seg_count))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + LE32_TO_CPU(seg->seg_size))
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ ice_free(hw, hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_ddp_state
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_ddp_state state;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return state;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto fw_ddp_compat_free_alloc;
+ }
+
+ for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ state = ICE_DDP_PKG_FW_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ ice_free(hw, pkg);
+ return state;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in hw for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ *
+ */
+static enum ice_ddp_state
+ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+ if (hw->dcf_enabled)
+ return;
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
+ * @hw: pointer to the HW structure
+ * @ptype: the hardware PTYPE
+ */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
+{
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ ice_is_bit_set(hw->hw_ptype, ptype);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *
+ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
+ if (index >= LE16_TO_CPU(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void
+ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ do {
+ tcam = (struct ice_marker_ptype_tcam_entry *)
+ ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
+ struct ice_pkg_hdr *pkg;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return state;
+ }
+
+ /* initialize package info */
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
+
+ /* For packages with signing segments, must be a matching segment */
+ if (hw->pkg_has_signing_seg)
+ if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ return ICE_DDP_PKG_ERR;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ state = ice_download_pkg(hw, pkg, seg);
+
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
+ ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
+ already_loaded = true;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
+ }
+
+ if (ice_is_init_pkg_successful(state)) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
+ ice_get_prof_index_max(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+ state);
+ }
+
+ return state;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+ enum ice_ddp_state state;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
+ /* Free the copy, since we failed to initialize the package */
+ ice_free(hw, buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return state;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+ bool valid_prof = false;
+ u16 i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ if (fv->ew[i].off != ICE_NAN_OFFSET)
+ valid_prof = true;
+
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+
+ /* PPPOE tunnel will have PPPOE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
+ return ICE_PROF_TUN_PPPOE;
+ }
+
+ return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ ice_bitmap_t *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+ ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv);
+
+ if (req_profs & prof_type)
+ ice_set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and offset and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!lkups->n_val_words || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!ice_is_bit_set(bm, (u16)offset))
+ continue;
+
+ for (i = 0; i < lkups->n_val_words; i++) {
+ int j;
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == lkups->n_val_words) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (LIST_EMPTY(fv_list))
+ return ICE_ERR_CFG;
+ return ICE_SUCCESS;
+
+err:
+ LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+ list_entry) {
+ LIST_DEL(&fvl->list_entry);
+ ice_free(hw, fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ ice_set_bit(i,
+ hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ ice_free(hw, bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = LE16_TO_CPU(buf->data_end) +
+ FLEX_ARRAY_SIZE(buf, section_entry, count);
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = LE16_TO_CPU(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ICE_ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = LE16_TO_CPU(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+ buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+ buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+ data_end += size;
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ buf->section_count = CPU_TO_LE16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (bld)
+ return &bld->buf;
+
+ return NULL;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms;
+
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
+
+ return (_FORCE_ struct ice_buf_table *)
+ (nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = LE16_TO_CPU(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = LE16_TO_CPU(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ CPU_TO_LE32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect = ((u8 *)state->buf) +
+ LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = (struct ice_boost_tcam_section *)section;
+ if (index >= LE16_TO_CPU(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = (struct ice_boost_tcam_entry *)
+ ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+ *entry = tcam;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
+
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ enum ice_status status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_get_set_tx_topo - get or set tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set tx topology
+ */
+static enum ice_status
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_set_tx_topo;
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topolgy */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+ }
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = desc.params.get_set_tx_topo.set_flags;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ */
+enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ u8 *current_topo, *new_topo = NULL;
+ struct ice_run_time_cfg_seg *seg;
+ struct ice_buf_hdr *section;
+ struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 i, size = 0, offset;
+ enum ice_status status;
+ u32 reg = 0;
+ u8 flags;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return ICE_ERR_NOT_SUPPORTED;
+ }
+
+ current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
+ if (!current_topo)
+ return ICE_ERR_NO_MEMORY;
+
+ /* get the current Tx topology */
+ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+ &flags, false);
+ ice_free(hw, current_topo);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return status;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 9) {
+ ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
+ /* Already default topology is loaded */
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 5) {
+ ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
+ /* Already new topology is loaded */
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Is set topology issued already ? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n");
+ /* add a small delay before exiting */
+ for (i = 0; i < 20; i++)
+ ice_msec_delay(100, true);
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == 5) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return ICE_ERR_CFG;
+ }
+
+ /* find run time configuration segment */
+ seg = (struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return ICE_ERR_CFG;
+ }
+
+ if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return ICE_ERR_CFG;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+
+ if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return ICE_ERR_CFG;
+ }
+
+ size = LE16_TO_CPU(section->section_entry[0].size);
+ offset = LE16_TO_CPU(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Get the new topology buffer */
+ new_topo = ((u8 *)section) + offset;
+
+update_topo:
+ /* acquire global lock to make sure that set topology issued
+ * by one PF
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return status;
+ }
+
+ /* check reset was triggered already or not */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ /* Reset is in progress, re-init the hw again */
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
+ ice_check_reset(hw);
+ return ICE_SUCCESS;
+ }
+
+ /* set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
+ return status;
+ }
+
+ /* new topology is updated, delay 1 second before issuing the CORRER */
+ for (i = 0; i < 10; i++)
+ ice_msec_delay(100, true);
+ ice_reset(hw, ICE_RESET_CORER);
+ /* CORER will clear the global lock, so no explicit call
+ * required for release
+ */
+ return ICE_SUCCESS;
+}
diff --git a/drivers/net/ice/base/ice_ddp.h b/drivers/net/ice/base/ice_ddp.h
new file mode 100644
index 0000000000..53bbbe2a5a
--- /dev/null
+++ b/drivers/net/ice/base/ice_ddp.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _ICE_DDP_H_
+#define _ICE_DDP_H_
+
+#include "ice_osdep.h"
+#include "ice_adminq_cmd.h"
+#include "ice_controlq.h"
+#include "ice_status.h"
+#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* Missing security manifest in DDP pkg */
+ ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
+
+ /* The RSA signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
+
+ /* Manifest hash mismatch */
+ ICE_DDP_PKG_MANIFEST_INVALID = -12,
+
+ /* Buffer hash mismatches manifest */
+ ICE_DDP_PKG_BUFFER_INVALID = -13,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -14,
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[STRUCT_HACK_VAR_LEN];
+};
+
+/* Package signing algorithm types */
+#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
+#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
+#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
+#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_INVALID 0x00000000
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE_E810 0x00000010
+#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
+};
+
+struct ice_run_time_cfg_seg {
+ struct ice_generic_seg_hdr hdr;
+ u8 rsvd[8];
+ struct ice_buf_table buf_table;
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+struct ice_sign_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 seg_id;
+ __le32 sign_type;
+ __le32 signed_seg_idx;
+ __le32 signed_buf_start;
+ __le32 signed_buf_count;
+#define ICE_SIGN_SEG_RESERVED_COUNT 44
+ u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
+ struct ice_buf_table buf_tbl;
+};
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_METADATA 1
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
+#define ICE_SID_RXPARSER_XLT0_BUILDER 53
+#define ICE_SID_RXPARSER_NODE_PTYPE 54
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_RXPARSER_XLT0 59
+
+#define ICE_SID_TXPARSER_CAM 60
+#define ICE_SID_TXPARSER_NOMATCH_CAM 61
+#define ICE_SID_TXPARSER_IMEM 62
+#define ICE_SID_TXPARSER_XLT0_BUILDER 63
+#define ICE_SID_TXPARSER_NODE_PTYPE 64
+#define ICE_SID_TXPARSER_MARKER_PTYPE 65
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_TXPARSER_PROTO_GRP 67
+#define ICE_SID_TXPARSER_METADATA_INIT 68
+#define ICE_SID_TXPARSER_XLT0 69
+
+#define ICE_SID_RXPARSER_INIT_REDIR 70
+#define ICE_SID_TXPARSER_INIT_REDIR 71
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_TXPARSER_MARKER_GRP 73
+#define ICE_SID_RXPARSER_LAST_PROTO 74
+#define ICE_SID_TXPARSER_LAST_PROTO 75
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_TXPARSER_PG_SPILL 77
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
+#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+#define ICE_SID_RXPARSER_FLAG_REDIR 97
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
+#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
+#define ICE_SID_LBL_RESERVED_12 0x80000012
+#define ICE_SID_LBL_RESERVED_13 0x80000013
+#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
+#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
+#define ICE_SID_LBL_PTYPE 0x80000016
+#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
+#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
+#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
+#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
+#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
+#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
+#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
+#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
+#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
+#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
+#define ICE_SID_LBL_FLAG 0x80000023
+#define ICE_SID_LBL_REG 0x80000024
+#define ICE_SID_LBL_SW_PTG 0x80000025
+#define ICE_SID_LBL_ACL_PTG 0x80000026
+#define ICE_SID_LBL_PE_PTG 0x80000027
+#define ICE_SID_LBL_RSS_PTG 0x80000028
+#define ICE_SID_LBL_FD_PTG 0x80000029
+#define ICE_SID_LBL_SW_VSIG 0x8000002A
+#define ICE_SID_LBL_ACL_VSIG 0x8000002B
+#define ICE_SID_LBL_PE_VSIG 0x8000002C
+#define ICE_SID_LBL_RSS_VSIG 0x8000002D
+#define ICE_SID_LBL_FD_VSIG 0x8000002E
+#define ICE_SID_LBL_PTYPE_META 0x8000002F
+#define ICE_SID_LBL_SW_PROFID 0x80000030
+#define ICE_SID_LBL_ACL_PROFID 0x80000031
+#define ICE_SID_LBL_PE_PROFID 0x80000032
+#define ICE_SID_LBL_RSS_PROFID 0x80000033
+#define ICE_SID_LBL_FD_PROFID 0x80000034
+#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
+#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
+#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
+#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+/* Label ICE runtime configuration section IDs */
+#define ICE_SID_TX_5_LAYER_TOPO 0x10
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+/* package Marker PType TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+ ice_struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
+struct ice_hw;
+
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+enum ice_status
+ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+void ice_release_global_cfg_lock(struct ice_hw *hw);
+struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr);
+enum ice_ddp_state
+ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
+enum ice_ddp_state
+ice_get_pkg_info(struct ice_hw *hw);
+void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
+enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access);
+
+struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
+struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
+bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
+void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset));
+void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type);
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+void ice_free_seg(struct ice_hw *hw);
+
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
+enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+
+#endif /* _ICE_DDP_H_ */
diff --git a/drivers/net/ice/base/ice_defs.h b/drivers/net/ice/base/ice_defs.h
new file mode 100644
index 0000000000..6e886f6aac
--- /dev/null
+++ b/drivers/net/ice/base/ice_defs.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2022 Intel Corporation
+ */
+
+#ifndef _ICE_DEFS_H_
+#define _ICE_DEFS_H_
+
+#define ETH_ALEN 6
+
+#define ETH_HEADER_LEN 14
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+#define BITS_PER_BYTE 8
+
+#define _FORCE_
+
+#define ICE_BYTES_PER_WORD 2
+#define ICE_BYTES_PER_DWORD 4
+#define ICE_MAX_TRAFFIC_CLASS 8
+
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ * Note, when b is a power of 2 use ICE_ALIGN() instead.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+#define STRUCT_HACK_VAR_LEN
+/**
+ * ice_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ice_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
+
+#endif /* _ICE_DEFS_H_ */
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 3918169001..a43d7ef76b 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3,6 +3,7 @@
*/
#include "ice_common.h"
+#include "ice_ddp.h"
#include "ice_flex_pipe.h"
#include "ice_protocol_type.h"
#include "ice_flow.h"
@@ -106,2049 +107,224 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
}
/**
- * ice_pkg_val_buf
- * @buf: pointer to the ice buffer
- *
- * This helper function validates a buffer's header.
- */
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
-{
- struct ice_buf_hdr *hdr;
- u16 section_count;
- u16 data_end;
-
- hdr = (struct ice_buf_hdr *)buf->buf;
- /* verify data */
- section_count = LE16_TO_CPU(hdr->section_count);
- if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
- return NULL;
-
- data_end = LE16_TO_CPU(hdr->data_end);
- if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
- return NULL;
-
- return hdr;
-}
-
-/**
- * ice_find_buf_table
- * @ice_seg: pointer to the ice segment
- *
- * Returns the address of the buffer table within the ice segment.
- */
-static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
-{
- struct ice_nvm_table *nvms;
-
- nvms = (struct ice_nvm_table *)
- (ice_seg->device_table +
- LE32_TO_CPU(ice_seg->device_table_count));
-
- return (_FORCE_ struct ice_buf_table *)
- (nvms->vers + LE32_TO_CPU(nvms->table_count));
-}
-
-/**
- * ice_pkg_enum_buf
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This function will enumerate all the buffers in the ice segment. The first
- * call is made with the ice_seg parameter non-NULL; on subsequent calls,
- * ice_seg is set to NULL which continues the enumeration. When the function
- * returns a NULL pointer, then the end of the buffers has been reached, or an
- * unexpected value has been detected (for example an invalid section count or
- * an invalid buffer end value).
- */
-static struct ice_buf_hdr *
-ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (ice_seg) {
- state->buf_table = ice_find_buf_table(ice_seg);
- if (!state->buf_table)
- return NULL;
-
- state->buf_idx = 0;
- return ice_pkg_val_buf(state->buf_table->buf_array);
- }
-
- if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
- return ice_pkg_val_buf(state->buf_table->buf_array +
- state->buf_idx);
- else
- return NULL;
-}
-
-/**
- * ice_pkg_advance_sect
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This helper function will advance the section within the ice segment,
- * also advancing the buffer if needed.
- */
-static bool
-ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (!ice_seg && !state->buf)
- return false;
-
- if (!ice_seg && state->buf)
- if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
- return true;
-
- state->buf = ice_pkg_enum_buf(ice_seg, state);
- if (!state->buf)
- return false;
-
- /* start of new buffer, reset section index */
- state->sect_idx = 0;
- return true;
-}
-
-/**
- * ice_pkg_enum_section
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- *
- * This function will enumerate all the sections of a particular type in the
- * ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the matching
- * sections has been reached.
- */
-void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type)
-{
- u16 offset, size;
-
- if (ice_seg)
- state->type = sect_type;
-
- if (!ice_pkg_advance_sect(ice_seg, state))
- return NULL;
-
- /* scan for next matching section */
- while (state->buf->section_entry[state->sect_idx].type !=
- CPU_TO_LE32(state->type))
- if (!ice_pkg_advance_sect(NULL, state))
- return NULL;
-
- /* validate section */
- offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
- if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
- return NULL;
-
- size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
- if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
- return NULL;
-
- /* make sure the section fits in the buffer */
- if (offset + size > ICE_PKG_BUF_SIZE)
- return NULL;
-
- state->sect_type =
- LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
-
- /* calc pointer to this section */
- state->sect = ((u8 *)state->buf) +
- LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
-
- return state->sect;
-}
-
-/**
- * ice_pkg_enum_entry
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- * @offset: pointer to variable that receives the offset in the table (optional)
- * @handler: function that handles access to the entries into the section type
- *
- * This function will enumerate all the entries in particular section type in
- * the ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the entries has
- * been reached.
- *
- * Since each section may have a different header and entry size, the handler
- * function is needed to determine the number and location entries in each
- * section.
- *
- * The offset parameter is optional, but should be used for sections that
- * contain an offset for each section table. For such cases, the section handler
- * function must return the appropriate offset + index to give the absolution
- * offset for each entry. For example, if the base for a section's header
- * indicates a base offset of 10, and the index for the entry is 2, then
- * section handler function should set the offset to 10 + 2 = 12.
- */
-void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
-{
- void *entry;
-
- if (ice_seg) {
- if (!handler)
- return NULL;
-
- if (!ice_pkg_enum_section(ice_seg, state, sect_type))
- return NULL;
-
- state->entry_idx = 0;
- state->handler = handler;
- } else {
- state->entry_idx++;
- }
-
- if (!state->handler)
- return NULL;
-
- /* get entry */
- entry = state->handler(state->sect_type, state->sect, state->entry_idx,
- offset);
- if (!entry) {
- /* end of a section, look for another section of this type */
- if (!ice_pkg_enum_section(NULL, state, 0))
- return NULL;
-
- state->entry_idx = 0;
- entry = state->handler(state->sect_type, state->sect,
- state->entry_idx, offset);
- }
-
- return entry;
-}
-
-/**
- * ice_hw_ptype_ena - check if the PTYPE is enabled or not
- * @hw: pointer to the HW structure
- * @ptype: the hardware PTYPE
- */
-bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
-{
- return ptype < ICE_FLOW_PTYPE_MAX &&
- ice_is_bit_set(hw->hw_ptype, ptype);
-}
-
-/**
- * ice_marker_ptype_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the Marker PType TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual Marker PType TCAM entries.
- */
-static void *
-ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_marker_ptype_tcam_section *marker_ptype;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
- return NULL;
-
- if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
- if (index >= LE16_TO_CPU(marker_ptype->count))
- return NULL;
-
- return marker_ptype->tcam + index;
-}
-
-/**
- * ice_fill_hw_ptype - fill the enabled PTYPE bit information
- * @hw: pointer to the HW structure
- */
-static void
-ice_fill_hw_ptype(struct ice_hw *hw)
-{
- struct ice_marker_ptype_tcam_entry *tcam;
- struct ice_seg *seg = hw->seg;
- struct ice_pkg_enum state;
-
- ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
- if (!seg)
- return;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- do {
- tcam = (struct ice_marker_ptype_tcam_entry *)
- ice_pkg_enum_entry(seg, &state,
- ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
- ice_marker_ptype_tcam_handler);
- if (tcam &&
- LE16_TO_CPU(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
- LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
- ice_set_bit(LE16_TO_CPU(tcam->ptype), hw->hw_ptype);
-
- seg = NULL;
- } while (tcam);
-}
-
-/**
- * ice_boost_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the boost TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual boost TCAM entries.
- */
-static void *
-ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_boost_tcam_section *boost;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
- return NULL;
-
- if (index > ICE_MAX_BST_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- boost = (struct ice_boost_tcam_section *)section;
- if (index >= LE16_TO_CPU(boost->count))
- return NULL;
-
- return boost->tcam + index;
-}
-
-/**
- * ice_find_boost_entry
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @addr: Boost TCAM address of entry to search for
- * @entry: returns pointer to the entry
- *
- * Finds a particular Boost TCAM entry and returns a pointer to that entry
- * if it is found. The ice_seg parameter must not be NULL since the first call
- * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
- */
-static enum ice_status
-ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
- struct ice_boost_tcam_entry **entry)
-{
- struct ice_boost_tcam_entry *tcam;
- struct ice_pkg_enum state;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return ICE_ERR_PARAM;
-
- do {
- tcam = (struct ice_boost_tcam_entry *)
- ice_pkg_enum_entry(ice_seg, &state,
- ICE_SID_RXPARSER_BOOST_TCAM, NULL,
- ice_boost_tcam_handler);
- if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
- *entry = tcam;
- return ICE_SUCCESS;
- }
-
- ice_seg = NULL;
- } while (tcam);
-
- *entry = NULL;
- return ICE_ERR_CFG;
-}
-
-/**
- * ice_label_enum_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the label entry to be returned
- * @offset: pointer to receive absolute offset, always zero for label sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual label entries.
- */
-static void *
-ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_label_section *labels;
-
- if (!section)
- return NULL;
-
- if (index > ICE_MAX_LABELS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- labels = (struct ice_label_section *)section;
- if (index >= LE16_TO_CPU(labels->count))
- return NULL;
-
- return labels->label + index;
-}
-
-/**
- * ice_enum_labels
- * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
- * @type: the section type that will contain the label (0 on subsequent calls)
- * @state: ice_pkg_enum structure that will hold the state of the enumeration
- * @value: pointer to a value that will return the label's value if found
- *
- * Enumerates a list of labels in the package. The caller will call
- * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
- * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
- * the end of the list has been reached.
- */
-static char *
-ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
- u16 *value)
-{
- struct ice_label *label;
-
- /* Check for valid label section on first call */
- if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
- return NULL;
-
- label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
- NULL,
- ice_label_enum_handler);
- if (!label)
- return NULL;
-
- *value = LE16_TO_CPU(label->value);
- return label->name;
-}
-
-/**
- * ice_add_tunnel_hint
- * @hw: pointer to the HW structure
- * @label_name: label text
- * @val: value of the tunnel port boost entry
- */
-static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
-{
- if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- u16 i;
-
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
-
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
-
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].in_use = false;
- hw->tnl.tbl[hw->tnl.count].marked = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
- }
-}
-
-/**
- * ice_add_dvm_hint
- * @hw: pointer to the HW structure
- * @val: value of the boost entry
- * @enable: true if entry needs to be enabled, or false if needs to be disabled
- */
-static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
-{
- if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
- hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
- hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
- hw->dvm_upd.count++;
- }
-}
-
-/**
- * ice_init_pkg_hints
- * @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
- */
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
-
- while (label_name) {
- if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
- /* check for a tunnel entry */
- ice_add_tunnel_hint(hw, label_name, val);
-
- /* check for a dvm mode entry */
- else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
- ice_add_dvm_hint(hw, val, true);
-
- /* check for a svm mode entry */
- else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
- ice_add_dvm_hint(hw, val, false);
-
- label_name = ice_enum_labels(NULL, 0, &state, &val);
- }
-
- /* Cache the appropriate boost TCAM entry pointers for tunnels */
- for (i = 0; i < hw->tnl.count; i++) {
- ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
- &hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
- hw->tnl.tbl[i].valid = true;
- }
-
- /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
- for (i = 0; i < hw->dvm_upd.count; i++)
- ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
- &hw->dvm_upd.tbl[i].boost_entry);
-}
-
-/* Key creation */
-
-#define ICE_DC_KEY 0x1 /* don't care */
-#define ICE_DC_KEYINV 0x1
-#define ICE_NM_KEY 0x0 /* never match */
-#define ICE_NM_KEYINV 0x0
-#define ICE_0_KEY 0x1 /* match 0 */
-#define ICE_0_KEYINV 0x0
-#define ICE_1_KEY 0x0 /* match 1 */
-#define ICE_1_KEYINV 0x1
-
-/**
- * ice_gen_key_word - generate 16-bits of a key/mask word
- * @val: the value
- * @valid: valid bits mask (change only the valid bits)
- * @dont_care: don't care mask
- * @nvr_mtch: never match mask
- * @key: pointer to an array of where the resulting key portion
- * @key_inv: pointer to an array of where the resulting key invert portion
- *
- * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
- * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
- * of key and 8 bits of key invert.
- *
- * '0' = b01, always match a 0 bit
- * '1' = b10, always match a 1 bit
- * '?' = b11, don't care bit (always matches)
- * '~' = b00, never match bit
- *
- * Input:
- * val: b0 1 0 1 0 1
- * dont_care: b0 0 1 1 0 0
- * never_mtch: b0 0 0 0 1 1
- * ------------------------------
- * Result: key: b01 10 11 11 00 00
- */
-static enum ice_status
-ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
- u8 *key_inv)
-{
- u8 in_key = *key, in_key_inv = *key_inv;
- u8 i;
-
- /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
- if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
- return ICE_ERR_CFG;
-
- *key = 0;
- *key_inv = 0;
-
- /* encode the 8 bits into 8-bit key and 8-bit key invert */
- for (i = 0; i < 8; i++) {
- *key >>= 1;
- *key_inv >>= 1;
-
- if (!(valid & 0x1)) { /* change only valid bits */
- *key |= (in_key & 0x1) << 7;
- *key_inv |= (in_key_inv & 0x1) << 7;
- } else if (dont_care & 0x1) { /* don't care bit */
- *key |= ICE_DC_KEY << 7;
- *key_inv |= ICE_DC_KEYINV << 7;
- } else if (nvr_mtch & 0x1) { /* never match bit */
- *key |= ICE_NM_KEY << 7;
- *key_inv |= ICE_NM_KEYINV << 7;
- } else if (val & 0x01) { /* exact 1 match */
- *key |= ICE_1_KEY << 7;
- *key_inv |= ICE_1_KEYINV << 7;
- } else { /* exact 0 match */
- *key |= ICE_0_KEY << 7;
- *key_inv |= ICE_0_KEYINV << 7;
- }
-
- dont_care >>= 1;
- nvr_mtch >>= 1;
- valid >>= 1;
- val >>= 1;
- in_key >>= 1;
- in_key_inv >>= 1;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_bits_max_set - determine if the number of bits set is within a maximum
- * @mask: pointer to the byte array which is the mask
- * @size: the number of bytes in the mask
- * @max: the max number of set bits
- *
- * This function determines if there are at most 'max' number of bits set in an
- * array. Returns true if the number for bits set is <= max or will return false
- * otherwise.
- */
-static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
-{
- u16 count = 0;
- u16 i;
-
- /* check each byte */
- for (i = 0; i < size; i++) {
- /* if 0, go to next byte */
- if (!mask[i])
- continue;
-
- /* We know there is at least one set bit in this byte because of
- * the above check; if we already have found 'max' number of
- * bits set, then we can return failure now.
- */
- if (count == max)
- return false;
-
- /* count the bits in this byte, checking threshold */
- count += ice_hweight8(mask[i]);
- if (count > max)
- return false;
- }
-
- return true;
-}
-
-/**
- * ice_set_key - generate a variable sized key with multiples of 16-bits
- * @key: pointer to where the key will be stored
- * @size: the size of the complete key in bytes (must be even)
- * @val: array of 8-bit values that makes up the value portion of the key
- * @upd: array of 8-bit masks that determine what key portion to update
- * @dc: array of 8-bit masks that make up the don't care mask
- * @nm: array of 8-bit masks that make up the never match mask
- * @off: the offset of the first byte in the key to update
- * @len: the number of bytes in the key update
- *
- * This function generates a key from a value, a don't care mask and a never
- * match mask.
- * upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> upd mask is all 1's (update all bits)
- * dc == NULL --> dc mask is all 0's (no don't care bits)
- * nm == NULL --> nm mask is all 0's (no never match bits)
- */
-enum ice_status
-ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
- u16 len)
-{
- u16 half_size;
- u16 i;
-
- /* size must be a multiple of 2 bytes. */
- if (size % 2)
- return ICE_ERR_CFG;
- half_size = size / 2;
-
- if (off + len > half_size)
- return ICE_ERR_CFG;
-
- /* Make sure at most one bit is set in the never match mask. Having more
- * than one never match mask bit set will cause HW to consume excessive
- * power otherwise; this is a power management efficiency check.
- */
-#define ICE_NVR_MTCH_BITS_MAX 1
- if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
- return ICE_ERR_CFG;
-
- for (i = 0; i < len; i++)
- if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
- dc ? dc[i] : 0, nm ? nm[i] : 0,
- key + off + i, key + half_size + off + i))
- return ICE_ERR_CFG;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_acquire_global_cfg_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the global config lock for reading
- * or writing of the package. When attempting to obtain write access, the
- * caller must check for the following two return values:
- *
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
-ice_acquire_global_cfg_lock(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
-{
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
- ICE_GLOBAL_CFG_LOCK_TIMEOUT);
-
- if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
-
- return status;
-}
-
-/**
- * ice_release_global_cfg_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the global config lock.
- */
-static void ice_release_global_cfg_lock(struct ice_hw *hw)
-{
- ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
-}
-
-/**
- * ice_acquire_change_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the change lock.
- */
-enum ice_status
-ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
-{
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
- ICE_CHANGE_LOCK_TIMEOUT);
-}
-
-/**
- * ice_release_change_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the change lock using the proper Admin Command.
- */
-void ice_release_change_lock(struct ice_hw *hw)
-{
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
-}
-
-/**
- * ice_aq_download_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Download Package (0x0C40)
- */
-static enum ice_status
-ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = LE32_TO_CPU(resp->error_offset);
- if (error_info)
- *error_info = LE32_TO_CPU(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-enum ice_status
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
-
-/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static enum ice_status
-ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
- bool last_buf, u32 *error_offset, u32 *error_info,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = LE32_TO_CPU(resp->error_offset);
- if (error_info)
- *error_info = LE32_TO_CPU(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_find_seg_in_pkg
- * @hw: pointer to the hardware structure
- * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- */
-static struct ice_generic_seg_hdr *
-ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
-{
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
- pkg_hdr->pkg_format_ver.update,
- pkg_hdr->pkg_format_ver.draft);
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
-
- seg = (struct ice_generic_seg_hdr *)
- ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
-
- if (LE32_TO_CPU(seg->seg_type) == seg_type)
- return seg;
- }
-
- return NULL;
-}
-
-/**
- * ice_update_pkg_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- */
-static enum ice_status
-ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status = ICE_SUCCESS;
- u32 i;
-
- for (i = 0; i < count; i++) {
- struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
- bool last = ((i + 1) == count);
- u32 offset, info;
-
- status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
- last, &offset, &info, NULL);
-
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ice_update_pkg
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
- */
-enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
-
- status = ice_update_pkg_no_lock(hw, bufs, count);
-
- ice_release_change_lock(hw);
-
- return status;
-}
-
-/**
- * ice_dwnld_cfg_bufs
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
- */
-static enum ice_status
-ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status;
- struct ice_buf_hdr *bh;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_ERR_PARAM;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)bufs;
- if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_SUCCESS;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
-
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
- }
-
- for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
-
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (LE16_TO_CPU(bh->section_count))
- if (LE32_TO_CPU(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
-
- if (last)
- break;
- }
-
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
- status);
- }
-
- ice_release_global_cfg_lock(hw);
-
- return status;
-}
-
-/**
- * ice_aq_get_pkg_info_list
- * @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
- */
-static enum ice_status
-ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
-
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
-}
-
-/**
- * ice_download_pkg
- * @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
- *
- * Handles the download of a complete package.
- */
-static enum ice_status
-ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_buf_table *ice_buf_tbl;
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- LE32_TO_CPU(ice_seg->hdr.seg_type),
- LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
-
- ice_buf_tbl = ice_find_buf_table(ice_seg);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- LE32_TO_CPU(ice_buf_tbl->buf_count));
-
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- LE32_TO_CPU(ice_buf_tbl->buf_count));
-
- ice_post_pkg_dwnld_vlan_mode_cfg(hw);
-
- return status;
-}
-
-/**
- * ice_init_pkg_info
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- *
- * Saves off the package details into the HW structure.
- */
-static enum ice_status
-ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- if (!pkg_hdr)
- return ICE_ERR_PARAM;
-
- hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
-
- ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
- hw->pkg_seg_id);
-
- seg_hdr = (struct ice_generic_seg_hdr *)
- ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
- if (seg_hdr) {
- struct ice_meta_sect *meta;
- struct ice_pkg_enum state;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- /* Get package information from the Metadata Section */
- meta = (struct ice_meta_sect *)
- ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
- ICE_SID_METADATA);
- if (!meta) {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
- }
-
- hw->pkg_ver = meta->ver;
- ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
- ICE_NONDMA_TO_NONDMA);
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta->ver.major, meta->ver.minor, meta->ver.update,
- meta->ver.draft, meta->name);
-
- hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
- ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
- sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_format_ver.major,
- seg_hdr->seg_format_ver.minor,
- seg_hdr->seg_format_ver.update,
- seg_hdr->seg_format_ver.draft,
- seg_hdr->seg_id);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_get_pkg_info
- * @hw: pointer to the hardware structure
- *
- * Store details of the package currently loaded in HW into the HW structure.
- */
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
-{
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
- u16 size;
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
- if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
- goto init_pkg_free_alloc;
-
- for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
-#define ICE_PKG_FLAG_COUNT 4
- char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
- u8 place = 0;
-
- if (pkg_info->pkg_info[i].is_active) {
- flags[place++] = 'A';
- hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
- hw->active_track_id =
- LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
- ice_memcpy(hw->active_pkg_name,
- pkg_info->pkg_info[i].name,
- sizeof(pkg_info->pkg_info[i].name),
- ICE_NONDMA_TO_NONDMA);
- hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
- }
- if (pkg_info->pkg_info[i].is_active_at_boot)
- flags[place++] = 'B';
- if (pkg_info->pkg_info[i].is_modified)
- flags[place++] = 'M';
- if (pkg_info->pkg_info[i].is_in_nvm)
- flags[place++] = 'N';
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
- i, pkg_info->pkg_info[i].ver.major,
- pkg_info->pkg_info[i].ver.minor,
- pkg_info->pkg_info[i].ver.update,
- pkg_info->pkg_info[i].ver.draft,
- pkg_info->pkg_info[i].name, flags);
- }
-
-init_pkg_free_alloc:
- ice_free(hw, pkg_info);
-
- return status;
-}
-
-/**
- * ice_verify_pkg - verify package
- * @pkg: pointer to the package buffer
- * @len: size of the package buffer
- *
- * Verifies various attributes of the package file, including length, format
- * version, and the requirement of at least one segment.
- */
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
-{
- u32 seg_count;
- u32 i;
-
- if (len < ice_struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
-
- if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
-
- /* pkg must have at least one segment */
- seg_count = LE32_TO_CPU(pkg->seg_count);
- if (seg_count < 1)
- return ICE_ERR_CFG;
-
- /* make sure segment array fits in package length */
- if (len < ice_struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
-
- /* all segments must fit within length */
- for (i = 0; i < seg_count; i++) {
- u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
-
- /* segment header must fit */
- if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
-
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
-
- /* segment body must fit */
- if (len < off + LE32_TO_CPU(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
- }
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_free_seg - free package segment pointer
- * @hw: pointer to the hardware structure
- *
- * Frees the package segment pointer in the proper manner, depending on if the
- * segment was allocated or just the passed in pointer was stored.
- */
-void ice_free_seg(struct ice_hw *hw)
-{
- if (hw->pkg_copy) {
- ice_free(hw, hw->pkg_copy);
- hw->pkg_copy = NULL;
- hw->pkg_size = 0;
- }
- hw->seg = NULL;
-}
-
-/**
- * ice_init_pkg_regs - initialize additional package registers
- * @hw: pointer to the hardware structure
- */
-static void ice_init_pkg_regs(struct ice_hw *hw)
-{
-#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
-#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
-#define ICE_SW_BLK_IDX 0
- if (hw->dcf_enabled)
- return;
-
- /* setup Switch block input mask, which is 48-bits in two parts */
- wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
- wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
-}
-
-/**
- * ice_chk_pkg_version - check package version for compatibility with driver
- * @pkg_ver: pointer to a version structure to check
- *
- * Check to make sure that the package about to be downloaded is compatible with
- * the driver. To be compatible, the major and minor components of the package
- * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
- * definitions.
- */
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
-{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_chk_pkg_compat
- * @hw: pointer to the hardware structure
- * @ospkg: pointer to the package hdr
- * @seg: pointer to the package segment hdr
- *
- * This function checks the package version compatibility with driver and NVM
- */
-static enum ice_status
-ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
- struct ice_seg **seg)
-{
- struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
- u16 size;
- u32 i;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- /* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
- }
-
- /* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
- ospkg);
- if (!*seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* Check if FW is compatible with the OS package */
- size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
- if (!pkg)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
- goto fw_ddp_compat_free_alloc;
-
- for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
- /* loop till we find the NVM package */
- if (!pkg->pkg_info[i].is_in_nvm)
- continue;
- if ((*seg)->hdr.seg_format_ver.major !=
- pkg->pkg_info[i].ver.major ||
- (*seg)->hdr.seg_format_ver.minor >
- pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
- }
- /* done processing NVM package so break */
- break;
- }
-fw_ddp_compat_free_alloc:
- ice_free(hw, pkg);
- return status;
-}
-
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section =
- (struct ice_sw_fv_section *)section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= LE16_TO_CPU(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = LE16_TO_CPU(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
-/**
- * ice_get_prof_index_max - get the max profile index for used profile
- * @hw: pointer to the HW struct
- *
- * Calling this function will get the max profile index for used profile
- * and store the index number in struct ice_switch_info *switch_info
- * in hw for following use.
- */
-static int ice_get_prof_index_max(struct ice_hw *hw)
-{
- u16 prof_index = 0, j, max_prof_index = 0;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- bool flag = false;
- struct ice_fv *fv;
- u32 offset;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
-
- do {
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* in the profile that not be used, the prot_id is set to 0xff
- * and the off is set to 0x1ff for all the field vectors.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
- fv->ew[j].off != ICE_FV_OFFSET_INVAL)
- flag = true;
- if (flag && prof_index > max_prof_index)
- max_prof_index = prof_index;
-
- prof_index++;
- flag = false;
- } while (fv);
-
- hw->switch_info->max_used_prof_index = max_prof_index;
-
- return ICE_SUCCESS;
-}
-
-/**
- * ice_init_pkg - initialize/download package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
- *
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
- */
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
-{
- struct ice_pkg_hdr *pkg;
- enum ice_status status;
- struct ice_seg *seg;
-
- if (!buf || !len)
- return ICE_ERR_PARAM;
-
- pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
- }
-
- /* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
-
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
-
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = ICE_SUCCESS;
- }
-
- /* Get information on the package currently loaded in HW, then make sure
- * the driver is compatible with this version.
- */
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
- }
-
- if (!status) {
- hw->seg = seg;
- /* on successful package download update other required
- * registers to support the package and fill HW tables
- * with package content.
- */
- ice_init_pkg_regs(hw);
- ice_fill_blk_tbls(hw);
- ice_fill_hw_ptype(hw);
- ice_get_prof_index_max(hw);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
- }
-
- return status;
-}
-
-/**
- * ice_copy_and_init_pkg - initialize/download a copy of the package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function copies the package buffer, and then calls ice_init_pkg() to
- * initialize the copied package contents.
- *
- * The copying is necessary if the package buffer supplied is constant, or if
- * the memory may disappear shortly after calling this function.
- *
- * If the package buffer resides in the data segment and can be modified, the
- * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
- *
- * However, if the package buffer needs to be copied first, such as when being
- * read from a file, the caller should use ice_copy_and_init_pkg().
- *
- * This function will first copy the package buffer, before calling
- * ice_init_pkg(). The caller is free to immediately destroy the original
- * package buffer, as the new copy will be managed by this function and
- * related routines.
- */
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
-{
- enum ice_status status;
- u8 *buf_copy;
-
- if (!buf || !len)
- return ICE_ERR_PARAM;
-
- buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
-
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
- /* Free the copy, since we failed to initialize the package */
- ice_free(hw, buf_copy);
- } else {
- /* Track the copied pkg so we can free it later */
- hw->pkg_copy = buf_copy;
- hw->pkg_size = len;
- }
-
- return status;
-}
-
-/**
- * ice_pkg_buf_alloc
- * @hw: pointer to the HW structure
- *
- * Allocates a package buffer and returns a pointer to the buffer header.
- * Note: all package contents must be in Little Endian form.
- */
-static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
-{
- struct ice_buf_build *bld;
- struct ice_buf_hdr *buf;
-
- bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
- if (!bld)
- return NULL;
-
- buf = (struct ice_buf_hdr *)bld;
- buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
- section_entry));
- return bld;
-}
-
-/**
- * ice_get_sw_prof_type - determine switch profile type
- * @hw: pointer to the HW structure
- * @fv: pointer to the switch field vector
- */
-static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
-{
- u16 i;
- bool valid_prof = false;
-
- for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
- if (fv->ew[i].off != ICE_NAN_OFFSET)
- valid_prof = true;
-
- /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
- fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
-
- /* GRE tunnel will have GRE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
-
- /* PPPOE tunnel will have PPPOE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
- return ICE_PROF_TUN_PPPOE;
- }
-
- return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
-}
-
-/**
- * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
- * @hw: pointer to hardware structure
- * @req_profs: type of profiles requested
- * @bm: pointer to memory for returning the bitmap of field vectors
- */
-void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
- ice_bitmap_t *bm)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
- ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
- ice_seg = hw->seg;
- do {
- enum ice_prof_type prof_type;
- u32 offset;
-
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- ice_seg = NULL;
-
- if (fv) {
- /* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
-
- if (req_profs & prof_type)
- ice_set_bit((u16)offset, bm);
- }
- } while (fv);
-}
-
-/**
- * ice_get_sw_fv_list
+ * ice_add_tunnel_hint
* @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
- * @bm: bitmap of field vectors to consider
- * @fv_list: Head of a list
- *
- * Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
- * "ice_sw_fv_list_entry". Every structure in the list has a field vector
- * definition and profile ID information
- * NOTE: The caller of the function is responsible for freeing the memory
- * allocated for every list entry.
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
*/
-enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
{
- struct ice_sw_fv_list_entry *fvl;
- struct ice_sw_fv_list_entry *tmp;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
- u32 offset;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
- do {
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
u16 i;
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* If field vector is not in the bitmap list, then skip this
- * profile.
- */
- if (!ice_is_bit_set(bm, (u16)offset))
- continue;
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
- for (i = 0; i < ids_cnt; i++) {
- int j;
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
*/
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
- break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == ids_cnt) {
- fvl = (struct ice_sw_fv_list_entry *)
- ice_malloc(hw, sizeof(*fvl));
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- LIST_ADD(&fvl->list_entry, fv_list);
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
break;
}
}
- } while (fv);
- if (LIST_EMPTY(fv_list))
- return ICE_ERR_CFG;
- return ICE_SUCCESS;
-
-err:
- LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
- list_entry) {
- LIST_DEL(&fvl->list_entry);
- ice_free(hw, fvl);
}
-
- return ICE_ERR_NO_MEMORY;
}
/**
- * ice_init_prof_result_bm - Initialize the profile result index bitmap
- * @hw: pointer to hardware structure
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
*/
-void ice_init_prof_result_bm(struct ice_hw *hw)
+void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
- if (!hw->seg)
- return;
-
- ice_seg = hw->seg;
- do {
- u32 off;
- u16 i;
-
- fv = (struct ice_fv *)
- ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &off, ice_sw_fv_handler);
- ice_seg = NULL;
- if (!fv)
- break;
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
- ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
- ICE_MAX_FV_WORDS);
+/* Key creation */
- /* Determine empty field vector indices, these can be
- * used for recipe results. Skip index 0, since it is
- * always used for Switch ID.
- */
- for (i = 1; i < ICE_MAX_FV_WORDS; i++)
- if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
- fv->ew[i].off == ICE_FV_OFFSET_INVAL)
- ice_set_bit(i,
- hw->switch_info->prof_res_bm[off]);
- } while (fv);
-}
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
/**
- * ice_pkg_buf_free
- * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
*
- * Frees a package buffer
- */
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
-{
- ice_free(hw, bld);
-}
-
-/**
- * ice_pkg_buf_reserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to reserve
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
*
- * Reserves one or more section table entries in a package buffer. This routine
- * can be called multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
*/
static enum ice_status
-ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
- if (!bld)
- return ICE_ERR_PARAM;
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
- buf = (struct ice_buf_hdr *)&bld->buf;
+ *key = 0;
+ *key_inv = 0;
- /* already an active section, can't increase table size */
- section_count = LE16_TO_CPU(buf->section_count);
- if (section_count > 0)
- return ICE_ERR_CFG;
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
- if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
- bld->reserved_section_table_entries += count;
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
- data_end = LE16_TO_CPU(buf->data_end) +
- FLEX_ARRAY_SIZE(buf, section_entry, count);
- buf->data_end = CPU_TO_LE16(data_end);
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
return ICE_SUCCESS;
}
/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
*
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
*/
-static void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
{
- struct ice_buf_hdr *buf;
- u16 sect_count;
- u16 data_end;
-
- if (!bld || !type || !size)
- return NULL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* check for enough space left in buffer */
- data_end = LE16_TO_CPU(buf->data_end);
-
- /* section start must align on 4 byte boundary */
- data_end = ICE_ALIGN(data_end, 4);
-
- if ((data_end + size) > ICE_MAX_S_DATA_END)
- return NULL;
-
- /* check for more available section table entries */
- sect_count = LE16_TO_CPU(buf->section_count);
- if (sect_count < bld->reserved_section_table_entries) {
- void *section_ptr = ((u8 *)buf) + data_end;
+ u16 count = 0;
+ u16 i;
- buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
- buf->section_entry[sect_count].size = CPU_TO_LE16(size);
- buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
- data_end += size;
- buf->data_end = CPU_TO_LE16(data_end);
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
- buf->section_count = CPU_TO_LE16(sect_count + 1);
- return section_ptr;
+ /* count the bits in this byte, checking threshold */
+ count += ice_hweight8(mask[i]);
+ if (count > max)
+ return false;
}
- /* no free section table entries */
- return NULL;
+ return true;
}
/**
- * ice_pkg_buf_alloc_single_section
- * @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
*
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> upd mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
*/
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section)
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
{
- struct ice_buf_build *buf;
-
- if (!section)
- return NULL;
-
- buf = ice_pkg_buf_alloc(hw);
- if (!buf)
- return NULL;
-
- if (ice_pkg_buf_reserve_section(buf, 1))
- goto ice_pkg_buf_alloc_single_section_err;
-
- *section = ice_pkg_buf_alloc_section(buf, type, size);
- if (!*section)
- goto ice_pkg_buf_alloc_single_section_err;
-
- return buf;
-
-ice_pkg_buf_alloc_single_section_err:
- ice_pkg_buf_free(hw, buf);
- return NULL;
-}
+ u16 half_size;
+ u16 i;
-/**
- * ice_pkg_buf_get_active_sections
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of active sections. Before using the package buffer
- * in an update package command, the caller should make sure that there is at
- * least one active section - otherwise, the buffer is not legal and should
- * not be used.
- * Note: all package contents must be in Little Endian form.
- */
-static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+ half_size = size / 2;
- if (!bld)
- return 0;
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
- buf = (struct ice_buf_hdr *)&bld->buf;
- return LE16_TO_CPU(buf->section_count);
-}
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
-/**
- * ice_pkg_buf
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Return a pointer to the buffer's header
- */
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
-{
- if (!bld)
- return NULL;
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
- return &bld->buf;
+ return ICE_SUCCESS;
}
/**
@@ -3956,6 +2132,18 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
}
}
+/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static
+void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
/**
* ice_fill_blk_tbls - Read package context for tables
* @hw: pointer to the hardware structure
@@ -4098,17 +2286,6 @@ void ice_free_hw_tbls(struct ice_hw *hw)
ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
}
-/**
- * ice_init_flow_profs - init flow profile locks and list heads
- * @hw: pointer to the hardware structure
- * @blk_idx: HW block index
- */
-static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
-{
- ice_init_lock(&hw->fl_profs_locks[blk_idx]);
- INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
-}
-
/**
* ice_clear_hw_tbls - clear HW tables and flow profiles
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index ab897de4f3..aab765e68f 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -7,23 +7,6 @@
#include "ice_type.h"
-/* Package minimal version supported */
-#define ICE_PKG_SUPP_VER_MAJ 1
-#define ICE_PKG_SUPP_VER_MNR 3
-
-/* Package format version */
-#define ICE_PKG_FMT_VER_MAJ 1
-#define ICE_PKG_FMT_VER_MNR 0
-#define ICE_PKG_FMT_VER_UPD 0
-#define ICE_PKG_FMT_VER_DFT 0
-
-#define ICE_PKG_CNT 4
-
-enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
-enum ice_status
-ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
-void ice_release_change_lock(struct ice_hw *hw);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
u8 *prot, u16 *off);
@@ -36,12 +19,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
-enum ice_status
-ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
-u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
-enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
@@ -79,31 +56,31 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk,
u16 dest_vsi_handle, u16 fdir_vsi_handle, int id);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
-ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
-void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section);
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
enum ice_status
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len);
-void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset));
-void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type);
+
+void ice_fill_blk_tbls(struct ice_hw *hw);
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+#define ICE_TNL_PRE "TNL_"
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
+
+void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val);
+void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable);
+
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 09a02fe9ac..d45653b637 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -14,6 +14,7 @@ struct ice_fv_word {
u16 off; /* Offset within the protocol header */
u8 resvrd;
};
+
#pragma pack()
#define ICE_MAX_NUM_PROFILES 256
@@ -23,251 +24,6 @@ struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
-/* Package and segment headers and tables */
-struct ice_pkg_hdr {
- struct ice_pkg_ver pkg_format_ver;
- __le32 seg_count;
- __le32 seg_offset[STRUCT_HACK_VAR_LEN];
-};
-
-/* generic segment */
-struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE_E810 0x00000010
- __le32 seg_type;
- struct ice_pkg_ver seg_format_ver;
- __le32 seg_size;
- char seg_id[ICE_PKG_NAME_SIZE];
-};
-
-/* ice specific segment */
-
-union ice_device_id {
- struct {
- __le16 device_id;
- __le16 vendor_id;
- } dev_vend_id;
- __le32 id;
-};
-
-struct ice_device_id_entry {
- union ice_device_id device;
- union ice_device_id sub_device;
-};
-
-struct ice_seg {
- struct ice_generic_seg_hdr hdr;
- __le32 device_table_count;
- struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
-};
-
-struct ice_nvm_table {
- __le32 table_count;
- __le32 vers[STRUCT_HACK_VAR_LEN];
-};
-
-struct ice_buf {
-#define ICE_PKG_BUF_SIZE 4096
- u8 buf[ICE_PKG_BUF_SIZE];
-};
-
-struct ice_buf_table {
- __le32 buf_count;
- struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
-};
-
-/* global metadata specific segment */
-struct ice_global_metadata_seg {
- struct ice_generic_seg_hdr hdr;
- struct ice_pkg_ver pkg_ver;
- __le32 rsvd;
- char pkg_name[ICE_PKG_NAME_SIZE];
-};
-
-#define ICE_MIN_S_OFF 12
-#define ICE_MAX_S_OFF 4095
-#define ICE_MIN_S_SZ 1
-#define ICE_MAX_S_SZ 4084
-
-/* section information */
-struct ice_section_entry {
- __le32 type;
- __le16 offset;
- __le16 size;
-};
-
-#define ICE_MIN_S_COUNT 1
-#define ICE_MAX_S_COUNT 511
-#define ICE_MIN_S_DATA_END 12
-#define ICE_MAX_S_DATA_END 4096
-
-#define ICE_METADATA_BUF 0x80000000
-
-struct ice_buf_hdr {
- __le16 section_count;
- __le16 data_end;
- struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
-};
-
-#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
- (ent_sz))
-
-/* ice package section IDs */
-#define ICE_SID_METADATA 1
-#define ICE_SID_XLT0_SW 10
-#define ICE_SID_XLT_KEY_BUILDER_SW 11
-#define ICE_SID_XLT1_SW 12
-#define ICE_SID_XLT2_SW 13
-#define ICE_SID_PROFID_TCAM_SW 14
-#define ICE_SID_PROFID_REDIR_SW 15
-#define ICE_SID_FLD_VEC_SW 16
-#define ICE_SID_CDID_KEY_BUILDER_SW 17
-#define ICE_SID_CDID_REDIR_SW 18
-
-#define ICE_SID_XLT0_ACL 20
-#define ICE_SID_XLT_KEY_BUILDER_ACL 21
-#define ICE_SID_XLT1_ACL 22
-#define ICE_SID_XLT2_ACL 23
-#define ICE_SID_PROFID_TCAM_ACL 24
-#define ICE_SID_PROFID_REDIR_ACL 25
-#define ICE_SID_FLD_VEC_ACL 26
-#define ICE_SID_CDID_KEY_BUILDER_ACL 27
-#define ICE_SID_CDID_REDIR_ACL 28
-
-#define ICE_SID_XLT0_FD 30
-#define ICE_SID_XLT_KEY_BUILDER_FD 31
-#define ICE_SID_XLT1_FD 32
-#define ICE_SID_XLT2_FD 33
-#define ICE_SID_PROFID_TCAM_FD 34
-#define ICE_SID_PROFID_REDIR_FD 35
-#define ICE_SID_FLD_VEC_FD 36
-#define ICE_SID_CDID_KEY_BUILDER_FD 37
-#define ICE_SID_CDID_REDIR_FD 38
-
-#define ICE_SID_XLT0_RSS 40
-#define ICE_SID_XLT_KEY_BUILDER_RSS 41
-#define ICE_SID_XLT1_RSS 42
-#define ICE_SID_XLT2_RSS 43
-#define ICE_SID_PROFID_TCAM_RSS 44
-#define ICE_SID_PROFID_REDIR_RSS 45
-#define ICE_SID_FLD_VEC_RSS 46
-#define ICE_SID_CDID_KEY_BUILDER_RSS 47
-#define ICE_SID_CDID_REDIR_RSS 48
-
-#define ICE_SID_RXPARSER_CAM 50
-#define ICE_SID_RXPARSER_NOMATCH_CAM 51
-#define ICE_SID_RXPARSER_IMEM 52
-#define ICE_SID_RXPARSER_XLT0_BUILDER 53
-#define ICE_SID_RXPARSER_NODE_PTYPE 54
-#define ICE_SID_RXPARSER_MARKER_PTYPE 55
-#define ICE_SID_RXPARSER_BOOST_TCAM 56
-#define ICE_SID_RXPARSER_PROTO_GRP 57
-#define ICE_SID_RXPARSER_METADATA_INIT 58
-#define ICE_SID_RXPARSER_XLT0 59
-
-#define ICE_SID_TXPARSER_CAM 60
-#define ICE_SID_TXPARSER_NOMATCH_CAM 61
-#define ICE_SID_TXPARSER_IMEM 62
-#define ICE_SID_TXPARSER_XLT0_BUILDER 63
-#define ICE_SID_TXPARSER_NODE_PTYPE 64
-#define ICE_SID_TXPARSER_MARKER_PTYPE 65
-#define ICE_SID_TXPARSER_BOOST_TCAM 66
-#define ICE_SID_TXPARSER_PROTO_GRP 67
-#define ICE_SID_TXPARSER_METADATA_INIT 68
-#define ICE_SID_TXPARSER_XLT0 69
-
-#define ICE_SID_RXPARSER_INIT_REDIR 70
-#define ICE_SID_TXPARSER_INIT_REDIR 71
-#define ICE_SID_RXPARSER_MARKER_GRP 72
-#define ICE_SID_TXPARSER_MARKER_GRP 73
-#define ICE_SID_RXPARSER_LAST_PROTO 74
-#define ICE_SID_TXPARSER_LAST_PROTO 75
-#define ICE_SID_RXPARSER_PG_SPILL 76
-#define ICE_SID_TXPARSER_PG_SPILL 77
-#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
-#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
-
-#define ICE_SID_XLT0_PE 80
-#define ICE_SID_XLT_KEY_BUILDER_PE 81
-#define ICE_SID_XLT1_PE 82
-#define ICE_SID_XLT2_PE 83
-#define ICE_SID_PROFID_TCAM_PE 84
-#define ICE_SID_PROFID_REDIR_PE 85
-#define ICE_SID_FLD_VEC_PE 86
-#define ICE_SID_CDID_KEY_BUILDER_PE 87
-#define ICE_SID_CDID_REDIR_PE 88
-
-#define ICE_SID_RXPARSER_FLAG_REDIR 97
-
-/* Label Metadata section IDs */
-#define ICE_SID_LBL_FIRST 0x80000010
-#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
-#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
-#define ICE_SID_LBL_RESERVED_12 0x80000012
-#define ICE_SID_LBL_RESERVED_13 0x80000013
-#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
-#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
-#define ICE_SID_LBL_PTYPE 0x80000016
-#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
-#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
-#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
-#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
-#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
-#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
-#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
-#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
-#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
-#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
-#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
-#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
-#define ICE_SID_LBL_FLAG 0x80000023
-#define ICE_SID_LBL_REG 0x80000024
-#define ICE_SID_LBL_SW_PTG 0x80000025
-#define ICE_SID_LBL_ACL_PTG 0x80000026
-#define ICE_SID_LBL_PE_PTG 0x80000027
-#define ICE_SID_LBL_RSS_PTG 0x80000028
-#define ICE_SID_LBL_FD_PTG 0x80000029
-#define ICE_SID_LBL_SW_VSIG 0x8000002A
-#define ICE_SID_LBL_ACL_VSIG 0x8000002B
-#define ICE_SID_LBL_PE_VSIG 0x8000002C
-#define ICE_SID_LBL_RSS_VSIG 0x8000002D
-#define ICE_SID_LBL_FD_VSIG 0x8000002E
-#define ICE_SID_LBL_PTYPE_META 0x8000002F
-#define ICE_SID_LBL_SW_PROFID 0x80000030
-#define ICE_SID_LBL_ACL_PROFID 0x80000031
-#define ICE_SID_LBL_PE_PROFID 0x80000032
-#define ICE_SID_LBL_RSS_PROFID 0x80000033
-#define ICE_SID_LBL_FD_PROFID 0x80000034
-#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
-#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
-#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
-#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
-/* The following define MUST be updated to reflect the last label section ID */
-#define ICE_SID_LBL_LAST 0x80000038
-
-enum ice_block {
- ICE_BLK_SW = 0,
- ICE_BLK_ACL,
- ICE_BLK_FD,
- ICE_BLK_RSS,
- ICE_BLK_PE,
- ICE_BLK_COUNT
-};
-
-enum ice_sect {
- ICE_XLT0 = 0,
- ICE_XLT_KB,
- ICE_XLT1,
- ICE_XLT2,
- ICE_PROF_TCAM,
- ICE_PROF_REDIR,
- ICE_VEC_TBL,
- ICE_CDID_KB,
- ICE_CDID_REDIR,
- ICE_SECT_COUNT
-};
-
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
#define ICE_MAC_PTP 2
@@ -662,25 +418,6 @@ struct ice_boost_tcam_section {
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
-/* package Marker PType TCAM entry */
-struct ice_marker_ptype_tcam_entry {
-#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
- __le16 addr;
- __le16 ptype;
- u8 keys[20];
-};
-
-struct ice_marker_ptype_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_marker_ptype_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
-};
-
-#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- ice_struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_marker_ptype_tcam_entry), \
- sizeof(struct ice_marker_ptype_tcam_entry))
-
struct ice_xlt1_section {
__le16 count;
__le16 offset;
@@ -699,27 +436,6 @@ struct ice_prof_redir_section {
u8 redir_value[STRUCT_HACK_VAR_LEN];
};
-/* package buffer building */
-
-struct ice_buf_build {
- struct ice_buf buf;
- u16 reserved_section_table_entries;
-};
-
-struct ice_pkg_enum {
- struct ice_buf_table *buf_table;
- u32 buf_idx;
-
- u32 type;
- struct ice_buf_hdr *buf;
- u32 sect_idx;
- void *sect;
- u32 sect_type;
-
- u32 entry_idx;
- void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
-};
-
/* Tunnel enabling */
enum ice_tunnel_type {
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 513623a0a4..ad61dde397 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -7417,37 +7417,18 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
* @hw: pointer to hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
* structure per protocol header
- * @lkups_cnt: number of protocols
* @bm: bitmap of field vectors to consider
* @fv_list: pointer to a list that holds the returned field vectors
*/
static enum ice_status
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ice_get_fv(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
{
- enum ice_status status;
- u8 *prot_ids;
- u16 i;
-
- if (!lkups_cnt)
+ if (!lkups->n_val_words)
return ICE_SUCCESS;
- prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
- if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
-
- for (i = 0; i < lkups_cnt; i++)
- if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = ICE_ERR_CFG;
- goto free_mem;
- }
-
/* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
- ice_free(hw, prot_ids);
- return status;
+ return ice_get_sw_fv_list(hw, lkups, bm, fv_list);
}
/**
@@ -7840,16 +7821,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- /* If it is a packet to match any, add a lookup element to match direction
- * flag of source interface.
- */
- if (rinfo->tun_type == ICE_SW_TUN_AND_NON_TUN &&
- lkups_cnt < ICE_MAX_CHAIN_WORDS) {
- lkups[lkups_cnt].type = ICE_FLG_DIR;
- lkups_cnt++;
- }
-
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_fv(hw, lkup_exts, fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index a17accff19..d94fdcda67 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -5,54 +5,15 @@
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
-#define ETH_ALEN 6
-
-#define ETH_HEADER_LEN 14
-
-#define BIT(a) (1UL << (a))
-#define BIT_ULL(a) (1ULL << (a))
-
-#define BITS_PER_BYTE 8
-
-#define _FORCE_
-
-#define ICE_BYTES_PER_WORD 2
-#define ICE_BYTES_PER_DWORD 4
-#define ICE_MAX_TRAFFIC_CLASS 8
-
-/**
- * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
- * @a: value to round up
- * @b: arbitrary multiple
- *
- * Round up to the next multiple of the arbitrary b.
- * Note, when b is a power of 2 use ICE_ALIGN() instead.
- */
-#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
-
-#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
-
-#define IS_ASCII(_ch) ((_ch) < 0x80)
-
-#define STRUCT_HACK_VAR_LEN
-/**
- * ice_struct_size - size of struct with C99 flexible array member
- * @ptr: pointer to structure
- * @field: flexible array member (last member of the structure)
- * @num: number of elements of that flexible array member
- */
-#define ice_struct_size(ptr, field, num) \
- (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
-
-#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
-
+#include "ice_defs.h"
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_bitops.h" /* Must come before ice_controlq.h */
-#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
+#include "ice_ddp.h"
+#include "ice_controlq.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
@@ -191,11 +152,6 @@ enum ice_aq_res_ids {
#define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
-enum ice_aq_res_access_type {
- ICE_RES_READ = 1,
- ICE_RES_WRITE
-};
-
struct ice_driver_ver {
u8 major_ver;
u8 minor_ver;
@@ -248,6 +204,7 @@ enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_E810,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K,
};
/* Media Types */
@@ -636,6 +593,7 @@ struct ice_hw_common_caps {
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -1247,7 +1205,9 @@ struct ice_hw {
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 pkg_seg_id;
+ u32 pkg_sign_type;
u32 active_track_id;
+ u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
diff --git a/drivers/net/ice/base/ice_vlan_mode.c b/drivers/net/ice/base/ice_vlan_mode.c
index 29c6509fc5..d1003a5a89 100644
--- a/drivers/net/ice/base/ice_vlan_mode.c
+++ b/drivers/net/ice/base/ice_vlan_mode.c
@@ -4,6 +4,7 @@
#include "ice_common.h"
+#include "ice_ddp.h"
/**
* ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM)
* @hw: pointer to the HW struct
diff --git a/drivers/net/ice/base/meson.build b/drivers/net/ice/base/meson.build
index 3cf4ce05fa..41ed2d96c6 100644
--- a/drivers/net/ice/base/meson.build
+++ b/drivers/net/ice/base/meson.build
@@ -26,6 +26,7 @@ sources = [
'ice_flg_rd.c',
'ice_xlt_kb.c',
'ice_parser_rt.c',
+ 'ice_ddp.c',
]
error_cflags = [
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 20/70] net/ice/base: refactor DDP code
2022-08-15 7:31 ` [PATCH v2 20/70] net/ice/base: refactor DDP code Qi Zhang
@ 2022-08-15 6:44 ` Yang, Qiming
0 siblings, 0 replies; 149+ messages in thread
From: Yang, Qiming @ 2022-08-15 6:44 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev, Temerkhanov, Sergey, Drewek, Wojciech, Nowlin, Dan
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Temerkhanov,
> Sergey <sergey.temerkhanov@intel.com>; Drewek, Wojciech
> <wojciech.drewek@intel.com>; Nowlin, Dan <dan.nowlin@intel.com>
> Subject: [PATCH v2 20/70] net/ice/base: refactor DDP code
>
> Move DDP related into ice_ddp.c.
> Refactor status flow for DDP load.
> Aslo added support for DDP signature segments.
>
> Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
> Signed-off-by: Wojciech Drewek <wojciech.drewek@intel.com>
> Signed-off-by: Dan Nowlin <dan.nowlin@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
Is this patch also cover some new adminqs for qos?
> drivers/net/ice/base/ice_adminq_cmd.h | 32 +
> drivers/net/ice/base/ice_bitops.h | 5 +-
> drivers/net/ice/base/ice_ddp.c | 2475 +++++++++++++++++++++++++
> drivers/net/ice/base/ice_ddp.h | 466 +++++
> drivers/net/ice/base/ice_defs.h | 49 +
> drivers/net/ice/base/ice_flex_pipe.c | 2175 ++--------------------
> drivers/net/ice/base/ice_flex_pipe.h | 57 +-
> drivers/net/ice/base/ice_flex_type.h | 286 +--
> drivers/net/ice/base/ice_switch.c | 36 +-
> drivers/net/ice/base/ice_type.h | 54 +-
> drivers/net/ice/base/ice_vlan_mode.c | 1 +
> drivers/net/ice/base/meson.build | 1 +
> 12 files changed, 3233 insertions(+), 2404 deletions(-)
> create mode 100644 drivers/net/ice/base/ice_ddp.c
> create mode 100644 drivers/net/ice/base/ice_ddp.h
> create mode 100644 drivers/net/ice/base/ice_defs.h
>
> diff --git a/drivers/net/ice/base/ice_adminq_cmd.h
> b/drivers/net/ice/base/ice_adminq_cmd.h
> index 517af4b6ef..8f7e13096c 100644
> --- a/drivers/net/ice/base/ice_adminq_cmd.h
> +++ b/drivers/net/ice/base/ice_adminq_cmd.h
> @@ -9,10 +9,19 @@
> * descriptor format. It is shared between Firmware and Software.
> */
>
> +#include "ice_osdep.h"
> +#include "ice_defs.h"
> +#include "ice_bitops.h"
> +
> #define ICE_MAX_VSI 768
> #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
> #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
>
> +enum ice_aq_res_access_type {
> + ICE_RES_READ = 1,
> + ICE_RES_WRITE
> +};
> +
> struct ice_aqc_generic {
> __le32 param0;
> __le32 param1;
> @@ -1035,6 +1044,24 @@ struct ice_aqc_get_topo {
> __le32 addr_low;
> };
>
> +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
> +struct ice_aqc_get_set_tx_topo {
> + u8 set_flags;
> +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
> +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
> +#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2)
> +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
> +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
> + u8 get_flags;
> +#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0
> +#define ICE_AQC_TX_TOPO_GET_PSM 1
> +#define ICE_AQC_TX_TOPO_GET_RAM 2
> + __le16 reserved1;
> + __le32 reserved2;
> + __le32 addr_high;
> + __le32 addr_low;
> +};
> +
> /* Update TSE (indirect 0x0403)
> * Get TSE (indirect 0x0404)
> * Add TSE (indirect 0x0401)
> @@ -3008,6 +3035,7 @@ struct ice_aq_desc {
> struct ice_aqc_clear_health_status clear_health_status;
> struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm;
> struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm;
> + struct ice_aqc_get_set_tx_topo get_set_tx_topo;
> } params;
> };
>
> @@ -3164,6 +3192,10 @@ enum ice_adminq_opc {
> ice_aqc_opc_query_node_to_root = 0x0413,
> ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
> ice_aqc_opc_remove_rl_profiles = 0x0415,
> + ice_aqc_opc_set_tx_topo = 0x0417,
> + ice_aqc_opc_get_tx_topo = 0x0418,
> + ice_aqc_opc_cfg_node_attr = 0x0419,
> + ice_aqc_opc_query_node_attr = 0x041A,
>
> /* PHY commands */
> ice_aqc_opc_get_phy_caps = 0x0600,
> diff --git a/drivers/net/ice/base/ice_bitops.h
> b/drivers/net/ice/base/ice_bitops.h
> index 21ec2014e1..8060c103fa 100644
> --- a/drivers/net/ice/base/ice_bitops.h
> +++ b/drivers/net/ice/base/ice_bitops.h
> @@ -5,6 +5,9 @@
> #ifndef _ICE_BITOPS_H_
> #define _ICE_BITOPS_H_
>
> +#include "ice_defs.h"
> +#include "ice_osdep.h"
> +
> /* Define the size of the bitmap chunk */
> typedef u32 ice_bitmap_t;
>
> @@ -13,7 +16,7 @@ typedef u32 ice_bitmap_t;
> /* Determine which chunk a bit belongs in */
> #define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
> /* How many chunks are required to store this many bits */
> -#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz),
> BITS_PER_CHUNK)
> +#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) /
> BITS_PER_CHUNK)
> /* Which bit inside a chunk this bit corresponds to */
> #define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
> /* How many bits are valid in the last chunk, assumes nr > 0 */
> diff --git a/drivers/net/ice/base/ice_ddp.c b/drivers/net/ice/base/ice_ddp.c
> new file mode 100644
> index 0000000000..d1cae48047
> --- /dev/null
> +++ b/drivers/net/ice/base/ice_ddp.c
> @@ -0,0 +1,2475 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2001-2022 Intel Corporation
> + */
> +
> +#include "ice_ddp.h"
> +#include "ice_type.h"
> +#include "ice_common.h"
> +#include "ice_sched.h"
> +
> +/**
> + * ice_aq_download_pkg
> + * @hw: pointer to the hardware structure
> + * @pkg_buf: the package buffer to transfer
> + * @buf_size: the size of the package buffer
> + * @last_buf: last buffer indicator
> + * @error_offset: returns error offset
> + * @error_info: returns error information
> + * @cd: pointer to command details structure or NULL
> + *
> + * Download Package (0x0C40)
> + */
> +static enum ice_status
> +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
> + u16 buf_size, bool last_buf, u32 *error_offset,
> + u32 *error_info, struct ice_sq_cd *cd)
> +{
> + struct ice_aqc_download_pkg *cmd;
> + struct ice_aq_desc desc;
> + enum ice_status status;
> +
> + if (error_offset)
> + *error_offset = 0;
> + if (error_info)
> + *error_info = 0;
> +
> + cmd = &desc.params.download_pkg;
> + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
> + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> +
> + if (last_buf)
> + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
> +
> + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> + if (status == ICE_ERR_AQ_ERROR) {
> + /* Read error from buffer only when the FW returned an
> error */
> + struct ice_aqc_download_pkg_resp *resp;
> +
> + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
> + if (error_offset)
> + *error_offset = LE32_TO_CPU(resp->error_offset);
> + if (error_info)
> + *error_info = LE32_TO_CPU(resp->error_info);
> + }
> +
> + return status;
> +}
> +
> +/**
> + * ice_aq_upload_section
> + * @hw: pointer to the hardware structure
> + * @pkg_buf: the package buffer which will receive the section
> + * @buf_size: the size of the package buffer
> + * @cd: pointer to command details structure or NULL
> + *
> + * Upload Section (0x0C41)
> + */
> +enum ice_status
> +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
> + u16 buf_size, struct ice_sq_cd *cd)
> +{
> + struct ice_aq_desc desc;
> +
> + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
> + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> +
> + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> +}
> +
> +/**
> + * ice_aq_update_pkg
> + * @hw: pointer to the hardware structure
> + * @pkg_buf: the package cmd buffer
> + * @buf_size: the size of the package cmd buffer
> + * @last_buf: last buffer indicator
> + * @error_offset: returns error offset
> + * @error_info: returns error information
> + * @cd: pointer to command details structure or NULL
> + *
> + * Update Package (0x0C42)
> + */
> +static enum ice_status
> +ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16
> buf_size,
> + bool last_buf, u32 *error_offset, u32 *error_info,
> + struct ice_sq_cd *cd)
> +{
> + struct ice_aqc_download_pkg *cmd;
> + struct ice_aq_desc desc;
> + enum ice_status status;
> +
> + if (error_offset)
> + *error_offset = 0;
> + if (error_info)
> + *error_info = 0;
> +
> + cmd = &desc.params.download_pkg;
> + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
> + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> +
> + if (last_buf)
> + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
> +
> + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> + if (status == ICE_ERR_AQ_ERROR) {
> + /* Read error from buffer only when the FW returned an
> error */
> + struct ice_aqc_download_pkg_resp *resp;
> +
> + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
> + if (error_offset)
> + *error_offset = LE32_TO_CPU(resp->error_offset);
> + if (error_info)
> + *error_info = LE32_TO_CPU(resp->error_info);
> + }
> +
> + return status;
> +}
> +
> +/**
> + * ice_find_seg_in_pkg
> + * @hw: pointer to the hardware structure
> + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
> + * @pkg_hdr: pointer to the package header to be searched
> + *
> + * This function searches a package file for a particular segment type. On
> + * success it returns a pointer to the segment header, otherwise it will
> + * return NULL.
> + */
> +struct ice_generic_seg_hdr *
> +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
> + struct ice_pkg_hdr *pkg_hdr)
> +{
> + u32 i;
> +
> + ice_debug(hw, ICE_DBG_PKG, "Package format
> version: %d.%d.%d.%d\n",
> + pkg_hdr->pkg_format_ver.major, pkg_hdr-
> >pkg_format_ver.minor,
> + pkg_hdr->pkg_format_ver.update,
> + pkg_hdr->pkg_format_ver.draft);
> +
> + /* Search all package segments for the requested segment type */
> + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
> + struct ice_generic_seg_hdr *seg;
> +
> + seg = (struct ice_generic_seg_hdr *)
> + ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr-
> >seg_offset[i]));
> +
> + if (LE32_TO_CPU(seg->seg_type) == seg_type)
> + return seg;
> + }
> +
> + return NULL;
> +}
> +
> +/**
> + * ice_get_pkg_seg_by_idx
> + * @pkg_hdr: pointer to the package header to be searched
> + * @idx: index of segment
> + */
> +static struct ice_generic_seg_hdr *
> +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
> +{
> + struct ice_generic_seg_hdr *seg = NULL;
> +
> + if (idx < LE32_TO_CPU(pkg_hdr->seg_count))
> + seg = (struct ice_generic_seg_hdr *)
> + ((u8 *)pkg_hdr +
> + LE32_TO_CPU(pkg_hdr->seg_offset[idx]));
> +
> + return seg;
> +}
> +
> +/**
> + * ice_is_signing_seg_at_idx - determine if segment is a signing segment
> + * @pkg_hdr: pointer to package header
> + * @idx: segment index
> + */
> +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
> +{
> + struct ice_generic_seg_hdr *seg;
> + bool retval = false;
> +
> + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
> + if (seg)
> + retval = LE32_TO_CPU(seg->seg_type) ==
> SEGMENT_TYPE_SIGNING;
> +
> + return retval;
> +}
> +
> +/**
> + * ice_is_signing_seg_type_at_idx
> + * @pkg_hdr: pointer to package header
> + * @idx: segment index
> + * @seg_id: segment id that is expected
> + * @sign_type: signing type
> + *
> + * Determine if a segment is a signing segment of the correct type
> + */
> +static bool
> +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
> + u32 seg_id, u32 sign_type)
> +{
> + bool result = false;
> +
> + if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) {
> + struct ice_sign_seg *seg;
> +
> + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr,
> + idx);
> + if (seg && LE32_TO_CPU(seg->seg_id) == seg_id &&
> + LE32_TO_CPU(seg->sign_type) == sign_type)
> + result = true;
> + }
> +
> + return result;
> +}
> +
> +/**
> + * ice_update_pkg_no_lock
> + * @hw: pointer to the hardware structure
> + * @bufs: pointer to an array of buffers
> + * @count: the number of buffers in the array
> + */
> +enum ice_status
> +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32
> count)
> +{
> + enum ice_status status = ICE_SUCCESS;
> + u32 i;
> +
> + for (i = 0; i < count; i++) {
> + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
> + bool last = ((i + 1) == count);
> + u32 offset, info;
> +
> + status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh-
> >data_end),
> + last, &offset, &info, NULL);
> +
> + if (status) {
> + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed:
> err %d off %d inf %d\n",
> + status, offset, info);
> + break;
> + }
> + }
> +
> + return status;
> +}
> +
> +/**
> + * ice_update_pkg
> + * @hw: pointer to the hardware structure
> + * @bufs: pointer to an array of buffers
> + * @count: the number of buffers in the array
> + *
> + * Obtains change lock and updates package.
> + */
> +enum ice_status
> +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
> +{
> + enum ice_status status;
> +
> + status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
> + if (status)
> + return status;
> +
> + status = ice_update_pkg_no_lock(hw, bufs, count);
> +
> + ice_release_change_lock(hw);
> +
> + return status;
> +}
> +
> +static enum ice_ddp_state
> +ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
> +{
> + switch (aq_err) {
> + case ICE_AQ_RC_ENOSEC:
> + return ICE_DDP_PKG_NO_SEC_MANIFEST;
> + case ICE_AQ_RC_EBADSIG:
> + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
> + case ICE_AQ_RC_ESVN:
> + return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW;
> + case ICE_AQ_RC_EBADMAN:
> + return ICE_DDP_PKG_MANIFEST_INVALID;
> + case ICE_AQ_RC_EBADBUF:
> + return ICE_DDP_PKG_BUFFER_INVALID;
> + default:
> + return ICE_DDP_PKG_ERR;
> + }
> +}
> +
> +/**
> + * ice_is_buffer_metadata - determine if package buffer is a metadata
> buffer
> + * @buf: pointer to buffer header
> + */
> +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
> +{
> + bool metadata = false;
> +
> + if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF)
> + metadata = true;
> +
> + return metadata;
> +}
> +
> +/**
> + * ice_is_last_download_buffer
> + * @buf: pointer to current buffer header
> + * @idx: index of the buffer in the current sequence
> + * @count: the buffer count in the current sequence
> + *
> + * Note: this routine should only be called if the buffer is not the last
> buffer
> + */
> +static bool
> +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
> +{
> + bool last = ((idx + 1) == count);
> +
> + /* A set metadata flag in the next buffer will signal that the current
> + * buffer will be the last buffer downloaded
> + */
> + if (!last) {
> + struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1;
> +
> + last = ice_is_buffer_metadata((struct ice_buf_hdr
> *)next_buf);
> + }
> +
> + return last;
> +}
> +
> +/**
> + * ice_dwnld_cfg_bufs_no_lock
> + * @hw: pointer to the hardware structure
> + * @bufs: pointer to an array of buffers
> + * @start: buffer index of first buffer to download
> + * @count: the number of buffers to download
> + * @indicate_last: if true, then set last buffer flag on last buffer download
> + *
> + * Downloads package configuration buffers to the firmware. Metadata
> buffers
> + * are skipped, and the first metadata buffer found indicates that the rest
> + * of the buffers are all metadata buffers.
> + */
> +static enum ice_ddp_state
> +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32
> start,
> + u32 count, bool indicate_last)
> +{
> + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
> + struct ice_buf_hdr *bh;
> + enum ice_aq_err err;
> + u32 offset, info, i;
> +
> + if (!bufs || !count)
> + return ICE_DDP_PKG_ERR;
> +
> + /* If the first buffer's first section has its metadata bit set
> + * then there are no buffers to be downloaded, and the operation is
> + * considered a success.
> + */
> + bh = (struct ice_buf_hdr *)(bufs + start);
> + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
> + return ICE_DDP_PKG_SUCCESS;
> +
> + for (i = 0; i < count; i++) {
> + enum ice_status status;
> + bool last = false;
> +
> + bh = (struct ice_buf_hdr *)(bufs + start + i);
> +
> + if (indicate_last)
> + last = ice_is_last_download_buffer(bh, i, count);
> +
> + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
> last,
> + &offset, &info, NULL);
> +
> + /* Save AQ status from download package */
> + if (status) {
> + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed:
> err %d off %d inf %d\n",
> + status, offset, info);
> + err = hw->adminq.sq_last_status;
> + state = ice_map_aq_err_to_ddp_state(err);
> + break;
> + }
> +
> + if (last)
> + break;
> + }
> +
> + return state;
> +}
> +
> +/**
> + * ice_aq_get_pkg_info_list
> + * @hw: pointer to the hardware structure
> + * @pkg_info: the buffer which will receive the information list
> + * @buf_size: the size of the pkg_info information buffer
> + * @cd: pointer to command details structure or NULL
> + *
> + * Get Package Info List (0x0C43)
> + */
> +static enum ice_status
> +ice_aq_get_pkg_info_list(struct ice_hw *hw,
> + struct ice_aqc_get_pkg_info_resp *pkg_info,
> + u16 buf_size, struct ice_sq_cd *cd)
> +{
> + struct ice_aq_desc desc;
> +
> + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
> +
> + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
> +}
> +
> +/**
> + * ice_has_signing_seg - determine if package has a signing segment
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to the driver's package hdr
> + */
> +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr
> *pkg_hdr)
> +{
> + struct ice_generic_seg_hdr *seg_hdr;
> +
> + seg_hdr = (struct ice_generic_seg_hdr *)
> + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
> +
> + return seg_hdr ? true : false;
> +}
> +
> +/**
> + * ice_get_pkg_segment_id - get correct package segment id, based on
> device
> + * @mac_type: MAC type of the device
> + */
> +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
> +{
> + u32 seg_id;
> +
> + switch (mac_type) {
> + case ICE_MAC_GENERIC:
> + case ICE_MAC_GENERIC_3K:
> + default:
> + seg_id = SEGMENT_TYPE_ICE_E810;
> + break;
> + }
> +
> + return seg_id;
> +}
> +
> +/**
> + * ice_get_pkg_sign_type - get package segment sign type, based on device
> + * @mac_type: MAC type of the device
> + */
> +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
> +{
> + u32 sign_type;
> +
> + switch (mac_type) {
> + case ICE_MAC_GENERIC_3K:
> + sign_type = SEGMENT_SIGN_TYPE_RSA3K;
> + break;
> + case ICE_MAC_GENERIC:
> + default:
> + sign_type = SEGMENT_SIGN_TYPE_RSA2K;
> + break;
> + }
> +
> + return sign_type;
> +}
> +
> +/**
> + * ice_get_signing_req - get correct package requirements, based on device
> + * @hw: pointer to the hardware structure
> + */
> +static void ice_get_signing_req(struct ice_hw *hw)
> +{
> + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
> + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
> +}
> +
> +/**
> + * ice_download_pkg_sig_seg - download a signature segment
> + * @hw: pointer to the hardware structure
> + * @seg: pointer to signature segment
> + */
> +static enum ice_ddp_state
> +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
> +{
> + enum ice_ddp_state state;
> +
> + state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
> + LE32_TO_CPU(seg-
> >buf_tbl.buf_count),
> + false);
> +
> + return state;
> +}
> +
> +/**
> + * ice_download_pkg_config_seg - download a config segment
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to package header
> + * @idx: segment index
> + * @start: starting buffer
> + * @count: buffer count
> + *
> + * Note: idx must reference a ICE segment
> + */
> +static enum ice_ddp_state
> +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr
> *pkg_hdr,
> + u32 idx, u32 start, u32 count)
> +{
> + struct ice_buf_table *bufs;
> + enum ice_ddp_state state;
> + struct ice_seg *seg;
> + u32 buf_count;
> +
> + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
> + if (!seg)
> + return ICE_DDP_PKG_ERR;
> +
> + bufs = ice_find_buf_table(seg);
> + buf_count = LE32_TO_CPU(bufs->buf_count);
> +
> + if (start >= buf_count || start + count > buf_count)
> + return ICE_DDP_PKG_ERR;
> +
> + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start,
> count,
> + true);
> +
> + return state;
> +}
> +
> +/**
> + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config
> segment
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to package header
> + * @idx: segment index (must be a signature segment)
> + *
> + * Note: idx must reference a signature segment
> + */
> +static enum ice_ddp_state
> +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr
> *pkg_hdr,
> + u32 idx)
> +{
> + enum ice_ddp_state state;
> + struct ice_sign_seg *seg;
> + u32 conf_idx;
> + u32 start;
> + u32 count;
> +
> + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
> + if (!seg) {
> + state = ICE_DDP_PKG_ERR;
> + goto exit;
> + }
> +
> + conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
> + start = LE32_TO_CPU(seg->signed_buf_start);
> + count = LE32_TO_CPU(seg->signed_buf_count);
> +
> + state = ice_download_pkg_sig_seg(hw, seg);
> + if (state)
> + goto exit;
> +
> + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
> + count);
> +
> +exit:
> + return state;
> +}
> +
> +/**
> + * ice_match_signing_seg - determine if a matching signing segment exists
> + * @pkg_hdr: pointer to package header
> + * @seg_id: segment id that is expected
> + * @sign_type: signing type
> + */
> +static bool
> +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32
> sign_type)
> +{
> + bool match = false;
> + u32 i;
> +
> + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
> + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
> + sign_type)) {
> + match = true;
> + break;
> + }
> + }
> +
> + return match;
> +}
> +
> +/**
> + * ice_post_dwnld_pkg_actions - perform post download package actions
> + * @hw: pointer to the hardware structure
> + */
> +static enum ice_ddp_state
> +ice_post_dwnld_pkg_actions(struct ice_hw *hw)
> +{
> + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
> + enum ice_status status;
> +
> + status = ice_set_vlan_mode(hw);
> + if (status) {
> + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode:
> err %d\n",
> + status);
> + state = ICE_DDP_PKG_ERR;
> + }
> +
> + return state;
> +}
> +
> +/**
> + * ice_download_pkg_with_sig_seg - download package using signature
> segments
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to package header
> + */
> +static enum ice_ddp_state
> +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr
> *pkg_hdr)
> +{
> + enum ice_aq_err aq_err = hw->adminq.sq_last_status;
> + enum ice_ddp_state state = ICE_DDP_PKG_ERR;
> + enum ice_status status;
> + u32 i;
> +
> + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
> + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw-
> >pkg_sign_type);
> +
> + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
> + if (status) {
> + if (status == ICE_ERR_AQ_NO_WORK)
> + state = ICE_DDP_PKG_ALREADY_LOADED;
> + else
> + state = ice_map_aq_err_to_ddp_state(aq_err);
> + return state;
> + }
> +
> + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
> + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw-
> >pkg_seg_id,
> + hw->pkg_sign_type))
> + continue;
> +
> + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
> + if (state)
> + break;
> + }
> +
> + if (!state)
> + state = ice_post_dwnld_pkg_actions(hw);
> +
> + ice_release_global_cfg_lock(hw);
> +
> + return state;
> +}
> +
> +/**
> + * ice_dwnld_cfg_bufs
> + * @hw: pointer to the hardware structure
> + * @bufs: pointer to an array of buffers
> + * @count: the number of buffers in the array
> + *
> + * Obtains global config lock and downloads the package configuration
> buffers
> + * to the firmware.
> + */
> +static enum ice_ddp_state
> +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
> +{
> + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
> + enum ice_status status;
> + struct ice_buf_hdr *bh;
> +
> + if (!bufs || !count)
> + return ICE_DDP_PKG_ERR;
> +
> + /* If the first buffer's first section has its metadata bit set
> + * then there are no buffers to be downloaded, and the operation is
> + * considered a success.
> + */
> + bh = (struct ice_buf_hdr *)bufs;
> + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
> + return ICE_DDP_PKG_SUCCESS;
> +
> + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
> + if (status) {
> + if (status == ICE_ERR_AQ_NO_WORK)
> + return ICE_DDP_PKG_ALREADY_LOADED;
> + return ice_map_aq_err_to_ddp_state(hw-
> >adminq.sq_last_status);
> + }
> +
> + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
> + if (!state)
> + state = ice_post_dwnld_pkg_actions(hw);
> +
> + ice_release_global_cfg_lock(hw);
> +
> + return state;
> +}
> +
> +/**
> + * ice_download_pkg_without_sig_seg
> + * @hw: pointer to the hardware structure
> + * @ice_seg: pointer to the segment of the package to be downloaded
> + *
> + * Handles the download of a complete package without signature
> segment.
> + */
> +static enum ice_ddp_state
> +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg
> *ice_seg)
> +{
> + struct ice_buf_table *ice_buf_tbl;
> + enum ice_ddp_state state;
> +
> + ice_debug(hw, ICE_DBG_PKG, "Segment format
> version: %d.%d.%d.%d\n",
> + ice_seg->hdr.seg_format_ver.major,
> + ice_seg->hdr.seg_format_ver.minor,
> + ice_seg->hdr.seg_format_ver.update,
> + ice_seg->hdr.seg_format_ver.draft);
> +
> + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
> + LE32_TO_CPU(ice_seg->hdr.seg_type),
> + LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
> +
> + ice_buf_tbl = ice_find_buf_table(ice_seg);
> +
> + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
> + LE32_TO_CPU(ice_buf_tbl->buf_count));
> +
> + state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
> + LE32_TO_CPU(ice_buf_tbl->buf_count));
> +
> + return state;
> +}
> +
> +/**
> + * ice_download_pkg
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to package header
> + * @ice_seg: pointer to the segment of the package to be downloaded
> + *
> + * Handles the download of a complete package.
> + */
> +static enum ice_ddp_state
> +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
> + struct ice_seg *ice_seg)
> +{
> + enum ice_ddp_state state;
> +
> + if (hw->pkg_has_signing_seg)
> + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
> + else
> + state = ice_download_pkg_without_sig_seg(hw, ice_seg);
> +
> + ice_post_pkg_dwnld_vlan_mode_cfg(hw);
> +
> + return state;
> +}
> +
> +/**
> + * ice_init_pkg_info
> + * @hw: pointer to the hardware structure
> + * @pkg_hdr: pointer to the driver's package hdr
> + *
> + * Saves off the package details into the HW structure.
> + */
> +static enum ice_ddp_state
> +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
> +{
> + struct ice_generic_seg_hdr *seg_hdr;
> +
> + if (!pkg_hdr)
> + return ICE_DDP_PKG_ERR;
> +
> + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
> + ice_get_signing_req(hw);
> +
> + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
> + hw->pkg_seg_id);
> +
> + seg_hdr = (struct ice_generic_seg_hdr *)
> + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
> + if (seg_hdr) {
> + struct ice_meta_sect *meta;
> + struct ice_pkg_enum state;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + /* Get package information from the Metadata Section */
> + meta = (struct ice_meta_sect *)
> + ice_pkg_enum_section((struct ice_seg *)seg_hdr,
> &state,
> + ICE_SID_METADATA);
> + if (!meta) {
> + ice_debug(hw, ICE_DBG_INIT, "Did not find ice
> metadata section in package\n");
> + return ICE_DDP_PKG_INVALID_FILE;
> + }
> +
> + hw->pkg_ver = meta->ver;
> + ice_memcpy(hw->pkg_name, meta->name, sizeof(meta-
> >name),
> + ICE_NONDMA_TO_NONDMA);
> +
> + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
> + meta->ver.major, meta->ver.minor, meta-
> >ver.update,
> + meta->ver.draft, meta->name);
> +
> + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
> + ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
> + sizeof(hw->ice_seg_id),
> ICE_NONDMA_TO_NONDMA);
> +
> + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
> + seg_hdr->seg_format_ver.major,
> + seg_hdr->seg_format_ver.minor,
> + seg_hdr->seg_format_ver.update,
> + seg_hdr->seg_format_ver.draft,
> + seg_hdr->seg_id);
> + } else {
> + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in
> driver package\n");
> + return ICE_DDP_PKG_INVALID_FILE;
> + }
> +
> + return ICE_DDP_PKG_SUCCESS;
> +}
> +
> +/**
> + * ice_get_pkg_info
> + * @hw: pointer to the hardware structure
> + *
> + * Store details of the package currently loaded in HW into the HW
> structure.
> + */
> +enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
> +{
> + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
> + struct ice_aqc_get_pkg_info_resp *pkg_info;
> + u16 size;
> + u32 i;
> +
> + size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
> + pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
> + if (!pkg_info)
> + return ICE_DDP_PKG_ERR;
> +
> + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
> + state = ICE_DDP_PKG_ERR;
> + goto init_pkg_free_alloc;
> + }
> +
> + for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
> +#define ICE_PKG_FLAG_COUNT 4
> + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
> + u8 place = 0;
> +
> + if (pkg_info->pkg_info[i].is_active) {
> + flags[place++] = 'A';
> + hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
> + hw->active_track_id =
> + LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
> + ice_memcpy(hw->active_pkg_name,
> + pkg_info->pkg_info[i].name,
> + sizeof(pkg_info->pkg_info[i].name),
> + ICE_NONDMA_TO_NONDMA);
> + hw->active_pkg_in_nvm = pkg_info-
> >pkg_info[i].is_in_nvm;
> + }
> + if (pkg_info->pkg_info[i].is_active_at_boot)
> + flags[place++] = 'B';
> + if (pkg_info->pkg_info[i].is_modified)
> + flags[place++] = 'M';
> + if (pkg_info->pkg_info[i].is_in_nvm)
> + flags[place++] = 'N';
> +
> + ice_debug(hw, ICE_DBG_PKG,
> "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
> + i, pkg_info->pkg_info[i].ver.major,
> + pkg_info->pkg_info[i].ver.minor,
> + pkg_info->pkg_info[i].ver.update,
> + pkg_info->pkg_info[i].ver.draft,
> + pkg_info->pkg_info[i].name, flags);
> + }
> +
> +init_pkg_free_alloc:
> + ice_free(hw, pkg_info);
> +
> + return state;
> +}
> +
> +/**
> + * ice_label_enum_handler
> + * @sect_type: section type
> + * @section: pointer to section
> + * @index: index of the label entry to be returned
> + * @offset: pointer to receive absolute offset, always zero for label
> sections
> + *
> + * This is a callback function that can be passed to ice_pkg_enum_entry.
> + * Handles enumeration of individual label entries.
> + */
> +static void *
> +ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section,
> u32 index,
> + u32 *offset)
> +{
> + struct ice_label_section *labels;
> +
> + if (!section)
> + return NULL;
> +
> + if (index > ICE_MAX_LABELS_IN_BUF)
> + return NULL;
> +
> + if (offset)
> + *offset = 0;
> +
> + labels = (struct ice_label_section *)section;
> + if (index >= LE16_TO_CPU(labels->count))
> + return NULL;
> +
> + return labels->label + index;
> +}
> +
> +/**
> + * ice_enum_labels
> + * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
> + * @type: the section type that will contain the label (0 on subsequent
> calls)
> + * @state: ice_pkg_enum structure that will hold the state of the
> enumeration
> + * @value: pointer to a value that will return the label's value if found
> + *
> + * Enumerates a list of labels in the package. The caller will call
> + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
> + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a
> NULL
> + * the end of the list has been reached.
> + */
> +static char *
> +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum
> *state,
> + u16 *value)
> +{
> + struct ice_label *label;
> +
> + /* Check for valid label section on first call */
> + if (type && !(type >= ICE_SID_LBL_FIRST && type <=
> ICE_SID_LBL_LAST))
> + return NULL;
> +
> + label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
> + NULL,
> +
> ice_label_enum_handler);
> + if (!label)
> + return NULL;
> +
> + *value = LE16_TO_CPU(label->value);
> + return label->name;
> +}
> +
> +/**
> + * ice_verify_pkg - verify package
> + * @pkg: pointer to the package buffer
> + * @len: size of the package buffer
> + *
> + * Verifies various attributes of the package file, including length, format
> + * version, and the requirement of at least one segment.
> + */
> +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
> +{
> + u32 seg_count;
> + u32 i;
> +
> + if (len < ice_struct_size(pkg, seg_offset, 1))
> + return ICE_DDP_PKG_INVALID_FILE;
> +
> + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
> + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
> + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
> + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
> + return ICE_DDP_PKG_INVALID_FILE;
> +
> + /* pkg must have at least one segment */
> + seg_count = LE32_TO_CPU(pkg->seg_count);
> + if (seg_count < 1)
> + return ICE_DDP_PKG_INVALID_FILE;
> +
> + /* make sure segment array fits in package length */
> + if (len < ice_struct_size(pkg, seg_offset, seg_count))
> + return ICE_DDP_PKG_INVALID_FILE;
> +
> + /* all segments must fit within length */
> + for (i = 0; i < seg_count; i++) {
> + u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
> + struct ice_generic_seg_hdr *seg;
> +
> + /* segment header must fit */
> + if (len < off + sizeof(*seg))
> + return ICE_DDP_PKG_INVALID_FILE;
> +
> + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
> +
> + /* segment body must fit */
> + if (len < off + LE32_TO_CPU(seg->seg_size))
> + return ICE_DDP_PKG_INVALID_FILE;
> + }
> +
> + return ICE_DDP_PKG_SUCCESS;
> +}
> +
> +/**
> + * ice_free_seg - free package segment pointer
> + * @hw: pointer to the hardware structure
> + *
> + * Frees the package segment pointer in the proper manner, depending on
> if the
> + * segment was allocated or just the passed in pointer was stored.
> + */
> +void ice_free_seg(struct ice_hw *hw)
> +{
> + if (hw->pkg_copy) {
> + ice_free(hw, hw->pkg_copy);
> + hw->pkg_copy = NULL;
> + hw->pkg_size = 0;
> + }
> + hw->seg = NULL;
> +}
> +
> +/**
> + * ice_chk_pkg_version - check package version for compatibility with
> driver
> + * @pkg_ver: pointer to a version structure to check
> + *
> + * Check to make sure that the package about to be downloaded is
> compatible with
> + * the driver. To be compatible, the major and minor components of the
> package
> + * version must match our ICE_PKG_SUPP_VER_MAJ and
> ICE_PKG_SUPP_VER_MNR
> + * definitions.
> + */
> +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver
> *pkg_ver)
> +{
> + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
> + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
> + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
> + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
> + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
> + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
> + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
> + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
> +
> + return ICE_DDP_PKG_SUCCESS;
> +}
> +
> +/**
> + * ice_chk_pkg_compat
> + * @hw: pointer to the hardware structure
> + * @ospkg: pointer to the package hdr
> + * @seg: pointer to the package segment hdr
> + *
> + * This function checks the package version compatibility with driver and
> NVM
> + */
> +static enum ice_ddp_state
> +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
> + struct ice_seg **seg)
> +{
> + struct ice_aqc_get_pkg_info_resp *pkg;
> + enum ice_ddp_state state;
> + u16 size;
> + u32 i;
> +
> + /* Check package version compatibility */
> + state = ice_chk_pkg_version(&hw->pkg_ver);
> + if (state) {
> + ice_debug(hw, ICE_DBG_INIT, "Package version check
> failed.\n");
> + return state;
> + }
> +
> + /* find ICE segment in given package */
> + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
> + ospkg);
> + if (!*seg) {
> + ice_debug(hw, ICE_DBG_INIT, "no ice segment in
> package.\n");
> + return ICE_DDP_PKG_INVALID_FILE;
> + }
> +
> + /* Check if FW is compatible with the OS package */
> + size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
> + pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
> + if (!pkg)
> + return ICE_DDP_PKG_ERR;
> +
> + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
> + state = ICE_DDP_PKG_ERR;
> + goto fw_ddp_compat_free_alloc;
> + }
> +
> + for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
> + /* loop till we find the NVM package */
> + if (!pkg->pkg_info[i].is_in_nvm)
> + continue;
> + if ((*seg)->hdr.seg_format_ver.major !=
> + pkg->pkg_info[i].ver.major ||
> + (*seg)->hdr.seg_format_ver.minor >
> + pkg->pkg_info[i].ver.minor) {
> + state = ICE_DDP_PKG_FW_MISMATCH;
> + ice_debug(hw, ICE_DBG_INIT, "OS package is not
> compatible with NVM.\n");
> + }
> + /* done processing NVM package so break */
> + break;
> + }
> +fw_ddp_compat_free_alloc:
> + ice_free(hw, pkg);
> + return state;
> +}
> +
> +/**
> + * ice_sw_fv_handler
> + * @sect_type: section type
> + * @section: pointer to section
> + * @index: index of the field vector entry to be returned
> + * @offset: ptr to variable that receives the offset in the field vector table
> + *
> + * This is a callback function that can be passed to ice_pkg_enum_entry.
> + * This function treats the given section as of type ice_sw_fv_section and
> + * enumerates offset field. "offset" is an index into the field vector table.
> + */
> +static void *
> +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
> +{
> + struct ice_sw_fv_section *fv_section =
> + (struct ice_sw_fv_section *)section;
> +
> + if (!section || sect_type != ICE_SID_FLD_VEC_SW)
> + return NULL;
> + if (index >= LE16_TO_CPU(fv_section->count))
> + return NULL;
> + if (offset)
> + /* "index" passed in to this function is relative to a given
> + * 4k block. To get to the true index into the field vector
> + * table need to add the relative index to the base_offset
> + * field of this section
> + */
> + *offset = LE16_TO_CPU(fv_section->base_offset) + index;
> + return fv_section->fv + index;
> +}
> +
> +/**
> + * ice_get_prof_index_max - get the max profile index for used profile
> + * @hw: pointer to the HW struct
> + *
> + * Calling this function will get the max profile index for used profile
> + * and store the index number in struct ice_switch_info *switch_info
> + * in hw for following use.
> + */
> +static int ice_get_prof_index_max(struct ice_hw *hw)
> +{
> + u16 prof_index = 0, j, max_prof_index = 0;
> + struct ice_pkg_enum state;
> + struct ice_seg *ice_seg;
> + bool flag = false;
> + struct ice_fv *fv;
> + u32 offset;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + if (!hw->seg)
> + return ICE_ERR_PARAM;
> +
> + ice_seg = hw->seg;
> +
> + do {
> + fv = (struct ice_fv *)
> + ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> + &offset, ice_sw_fv_handler);
> + if (!fv)
> + break;
> + ice_seg = NULL;
> +
> + /* in the profile that not be used, the prot_id is set to 0xff
> + * and the off is set to 0x1ff for all the field vectors.
> + */
> + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
> + if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
> + fv->ew[j].off != ICE_FV_OFFSET_INVAL)
> + flag = true;
> + if (flag && prof_index > max_prof_index)
> + max_prof_index = prof_index;
> +
> + prof_index++;
> + flag = false;
> + } while (fv);
> +
> + hw->switch_info->max_used_prof_index = max_prof_index;
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_get_ddp_pkg_state - get DDP pkg state after download
> + * @hw: pointer to the HW struct
> + * @already_loaded: indicates if pkg was already loaded onto the device
> + *
> + */
> +static enum ice_ddp_state
> +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
> +{
> + if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
> + hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
> + hw->pkg_ver.update == hw->active_pkg_ver.update &&
> + hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
> + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw-
> >pkg_name))) {
> + if (already_loaded)
> + return
> ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
> + else
> + return ICE_DDP_PKG_SUCCESS;
> + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
> + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
> + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
> + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
> + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
> + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
> + } else {
> + return ICE_DDP_PKG_ERR;
> + }
> +}
> +
> +/**
> + * ice_init_pkg_regs - initialize additional package registers
> + * @hw: pointer to the hardware structure
> + */
> +static void ice_init_pkg_regs(struct ice_hw *hw)
> +{
> +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
> +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
> +#define ICE_SW_BLK_IDX 0
> + if (hw->dcf_enabled)
> + return;
> +
> + /* setup Switch block input mask, which is 48-bits in two parts */
> + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX),
> ICE_SW_BLK_INP_MASK_L);
> + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX),
> ICE_SW_BLK_INP_MASK_H);
> +}
> +
> +/**
> + * ice_hw_ptype_ena - check if the PTYPE is enabled or not
> + * @hw: pointer to the HW structure
> + * @ptype: the hardware PTYPE
> + */
> +bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
> +{
> + return ptype < ICE_FLOW_PTYPE_MAX &&
> + ice_is_bit_set(hw->hw_ptype, ptype);
> +}
> +
> +/**
> + * ice_marker_ptype_tcam_handler
> + * @sect_type: section type
> + * @section: pointer to section
> + * @index: index of the Marker PType TCAM entry to be returned
> + * @offset: pointer to receive absolute offset, always 0 for ptype TCAM
> sections
> + *
> + * This is a callback function that can be passed to ice_pkg_enum_entry.
> + * Handles enumeration of individual Marker PType TCAM entries.
> + */
> +static void *
> +ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
> + u32 *offset)
> +{
> + struct ice_marker_ptype_tcam_section *marker_ptype;
> +
> + if (!section)
> + return NULL;
> +
> + if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
> + return NULL;
> +
> + if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
> + return NULL;
> +
> + if (offset)
> + *offset = 0;
> +
> + marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
> + if (index >= LE16_TO_CPU(marker_ptype->count))
> + return NULL;
> +
> + return marker_ptype->tcam + index;
> +}
> +
> +/**
> + * ice_fill_hw_ptype - fill the enabled PTYPE bit information
> + * @hw: pointer to the HW structure
> + */
> +static void
> +ice_fill_hw_ptype(struct ice_hw *hw)
> +{
> + struct ice_marker_ptype_tcam_entry *tcam;
> + struct ice_seg *seg = hw->seg;
> + struct ice_pkg_enum state;
> +
> + ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
> + if (!seg)
> + return;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + do {
> + tcam = (struct ice_marker_ptype_tcam_entry *)
> + ice_pkg_enum_entry(seg, &state,
> + ICE_SID_RXPARSER_MARKER_PTYPE,
> NULL,
> + ice_marker_ptype_tcam_handler);
> + if (tcam &&
> + LE16_TO_CPU(tcam->addr) <
> ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
> + LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
> + ice_set_bit(LE16_TO_CPU(tcam->ptype), hw-
> >hw_ptype);
> +
> + seg = NULL;
> + } while (tcam);
> +}
> +
> +/**
> + * ice_init_pkg - initialize/download package
> + * @hw: pointer to the hardware structure
> + * @buf: pointer to the package buffer
> + * @len: size of the package buffer
> + *
> + * This function initializes a package. The package contains HW tables
> + * required to do packet processing. First, the function extracts package
> + * information such as version. Then it finds the ice configuration segment
> + * within the package; this function then saves a copy of the segment
> pointer
> + * within the supplied package buffer. Next, the function will cache any
> hints
> + * from the package, followed by downloading the package itself. Note,
> that if
> + * a previous PF driver has already downloaded the package successfully,
> then
> + * the current driver will not have to download the package again.
> + *
> + * The local package contents will be used to query default behavior and
> to
> + * update specific sections of the HW's version of the package (e.g. to
> update
> + * the parse graph to understand new protocols).
> + *
> + * This function stores a pointer to the package buffer memory, and it is
> + * expected that the supplied buffer will not be freed immediately. If the
> + * package buffer needs to be freed, such as when read from a file, use
> + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
> + * case.
> + */
> +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
> +{
> + bool already_loaded = false;
> + enum ice_ddp_state state;
> + struct ice_pkg_hdr *pkg;
> + struct ice_seg *seg;
> +
> + if (!buf || !len)
> + return ICE_DDP_PKG_ERR;
> +
> + pkg = (struct ice_pkg_hdr *)buf;
> + state = ice_verify_pkg(pkg, len);
> + if (state) {
> + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg
> (err: %d)\n",
> + state);
> + return state;
> + }
> +
> + /* initialize package info */
> + state = ice_init_pkg_info(hw, pkg);
> + if (state)
> + return state;
> +
> + /* For packages with signing segments, must be a matching segment
> */
> + if (hw->pkg_has_signing_seg)
> + if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
> + hw->pkg_sign_type))
> + return ICE_DDP_PKG_ERR;
> +
> + /* before downloading the package, check package version for
> + * compatibility with driver
> + */
> + state = ice_chk_pkg_compat(hw, pkg, &seg);
> + if (state)
> + return state;
> +
> + /* initialize package hints and then download package */
> + ice_init_pkg_hints(hw, seg);
> + state = ice_download_pkg(hw, pkg, seg);
> +
> + if (state == ICE_DDP_PKG_ALREADY_LOADED) {
> + ice_debug(hw, ICE_DBG_INIT, "package previously loaded -
> no work.\n");
> + already_loaded = true;
> + }
> +
> + /* Get information on the package currently loaded in HW, then
> make sure
> + * the driver is compatible with this version.
> + */
> + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
> + state = ice_get_pkg_info(hw);
> + if (!state)
> + state = ice_get_ddp_pkg_state(hw, already_loaded);
> + }
> +
> + if (ice_is_init_pkg_successful(state)) {
> + hw->seg = seg;
> + /* on successful package download update other required
> + * registers to support the package and fill HW tables
> + * with package content.
> + */
> + ice_init_pkg_regs(hw);
> + ice_fill_blk_tbls(hw);
> + ice_fill_hw_ptype(hw);
> + ice_get_prof_index_max(hw);
> + } else {
> + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
> + state);
> + }
> +
> + return state;
> +}
> +
> +/**
> + * ice_copy_and_init_pkg - initialize/download a copy of the package
> + * @hw: pointer to the hardware structure
> + * @buf: pointer to the package buffer
> + * @len: size of the package buffer
> + *
> + * This function copies the package buffer, and then calls ice_init_pkg() to
> + * initialize the copied package contents.
> + *
> + * The copying is necessary if the package buffer supplied is constant, or if
> + * the memory may disappear shortly after calling this function.
> + *
> + * If the package buffer resides in the data segment and can be modified,
> the
> + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
> + *
> + * However, if the package buffer needs to be copied first, such as when
> being
> + * read from a file, the caller should use ice_copy_and_init_pkg().
> + *
> + * This function will first copy the package buffer, before calling
> + * ice_init_pkg(). The caller is free to immediately destroy the original
> + * package buffer, as the new copy will be managed by this function and
> + * related routines.
> + */
> +enum ice_ddp_state
> +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
> +{
> + enum ice_ddp_state state;
> + u8 *buf_copy;
> +
> + if (!buf || !len)
> + return ICE_DDP_PKG_ERR;
> +
> + buf_copy = (u8 *)ice_memdup(hw, buf, len,
> ICE_NONDMA_TO_NONDMA);
> +
> + state = ice_init_pkg(hw, buf_copy, len);
> + if (!ice_is_init_pkg_successful(state)) {
> + /* Free the copy, since we failed to initialize the package */
> + ice_free(hw, buf_copy);
> + } else {
> + /* Track the copied pkg so we can free it later */
> + hw->pkg_copy = buf_copy;
> + hw->pkg_size = len;
> + }
> +
> + return state;
> +}
> +
> +/**
> + * ice_is_init_pkg_successful - check if DDP init was successful
> + * @state: state of the DDP pkg after download
> + */
> +bool ice_is_init_pkg_successful(enum ice_ddp_state state)
> +{
> + switch (state) {
> + case ICE_DDP_PKG_SUCCESS:
> + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
> + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
> + return true;
> + default:
> + return false;
> + }
> +}
> +
> +/**
> + * ice_pkg_buf_alloc
> + * @hw: pointer to the HW structure
> + *
> + * Allocates a package buffer and returns a pointer to the buffer header.
> + * Note: all package contents must be in Little Endian form.
> + */
> +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
> +{
> + struct ice_buf_build *bld;
> + struct ice_buf_hdr *buf;
> +
> + bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
> + if (!bld)
> + return NULL;
> +
> + buf = (struct ice_buf_hdr *)bld;
> + buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
> + section_entry));
> + return bld;
> +}
> +
> +/**
> + * ice_get_sw_prof_type - determine switch profile type
> + * @hw: pointer to the HW structure
> + * @fv: pointer to the switch field vector
> + */
> +static enum ice_prof_type
> +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
> +{
> + bool valid_prof = false;
> + u16 i;
> +
> + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
> + if (fv->ew[i].off != ICE_NAN_OFFSET)
> + valid_prof = true;
> +
> + /* UDP tunnel will have UDP_OF protocol ID and VNI offset
> */
> + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
> + fv->ew[i].off == ICE_VNI_OFFSET)
> + return ICE_PROF_TUN_UDP;
> +
> + /* GRE tunnel will have GRE protocol */
> + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
> + return ICE_PROF_TUN_GRE;
> +
> + /* PPPOE tunnel will have PPPOE protocol */
> + if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
> + return ICE_PROF_TUN_PPPOE;
> + }
> +
> + return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
> +}
> +
> +/**
> + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile
> type
> + * @hw: pointer to hardware structure
> + * @req_profs: type of profiles requested
> + * @bm: pointer to memory for returning the bitmap of field vectors
> + */
> +void
> +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
> + ice_bitmap_t *bm)
> +{
> + struct ice_pkg_enum state;
> + struct ice_seg *ice_seg;
> + struct ice_fv *fv;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> + ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
> + ice_seg = hw->seg;
> + do {
> + enum ice_prof_type prof_type;
> + u32 offset;
> +
> + fv = (struct ice_fv *)
> + ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> + &offset, ice_sw_fv_handler);
> + ice_seg = NULL;
> +
> + if (fv) {
> + /* Determine field vector type */
> + prof_type = ice_get_sw_prof_type(hw, fv);
> +
> + if (req_profs & prof_type)
> + ice_set_bit((u16)offset, bm);
> + }
> + } while (fv);
> +}
> +
> +/**
> + * ice_get_sw_fv_list
> + * @hw: pointer to the HW structure
> + * @lkups: lookup elements or match criteria for the advanced recipe, one
> + * structure per protocol header
> + * @bm: bitmap of field vectors to consider
> + * @fv_list: Head of a list
> + *
> + * Finds all the field vector entries from switch block that contain
> + * a given protocol ID and offset and returns a list of structures of type
> + * "ice_sw_fv_list_entry". Every structure in the list has a field vector
> + * definition and profile ID information
> + * NOTE: The caller of the function is responsible for freeing the memory
> + * allocated for every list entry.
> + */
> +enum ice_status
> +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
> + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
> +{
> + struct ice_sw_fv_list_entry *fvl;
> + struct ice_sw_fv_list_entry *tmp;
> + struct ice_pkg_enum state;
> + struct ice_seg *ice_seg;
> + struct ice_fv *fv;
> + u32 offset;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + if (!lkups->n_val_words || !hw->seg)
> + return ICE_ERR_PARAM;
> +
> + ice_seg = hw->seg;
> + do {
> + u16 i;
> +
> + fv = (struct ice_fv *)
> + ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> + &offset, ice_sw_fv_handler);
> + if (!fv)
> + break;
> + ice_seg = NULL;
> +
> + /* If field vector is not in the bitmap list, then skip this
> + * profile.
> + */
> + if (!ice_is_bit_set(bm, (u16)offset))
> + continue;
> +
> + for (i = 0; i < lkups->n_val_words; i++) {
> + int j;
> +
> + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
> + if (fv->ew[j].prot_id ==
> + lkups->fv_words[i].prot_id &&
> + fv->ew[j].off == lkups->fv_words[i].off)
> + break;
> + if (j >= hw->blk[ICE_BLK_SW].es.fvw)
> + break;
> + if (i + 1 == lkups->n_val_words) {
> + fvl = (struct ice_sw_fv_list_entry *)
> + ice_malloc(hw, sizeof(*fvl));
> + if (!fvl)
> + goto err;
> + fvl->fv_ptr = fv;
> + fvl->profile_id = offset;
> + LIST_ADD(&fvl->list_entry, fv_list);
> + break;
> + }
> + }
> + } while (fv);
> + if (LIST_EMPTY(fv_list))
> + return ICE_ERR_CFG;
> + return ICE_SUCCESS;
> +
> +err:
> + LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
> + list_entry) {
> + LIST_DEL(&fvl->list_entry);
> + ice_free(hw, fvl);
> + }
> +
> + return ICE_ERR_NO_MEMORY;
> +}
> +
> +/**
> + * ice_init_prof_result_bm - Initialize the profile result index bitmap
> + * @hw: pointer to hardware structure
> + */
> +void ice_init_prof_result_bm(struct ice_hw *hw)
> +{
> + struct ice_pkg_enum state;
> + struct ice_seg *ice_seg;
> + struct ice_fv *fv;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + if (!hw->seg)
> + return;
> +
> + ice_seg = hw->seg;
> + do {
> + u32 off;
> + u16 i;
> +
> + fv = (struct ice_fv *)
> + ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> + &off, ice_sw_fv_handler);
> + ice_seg = NULL;
> + if (!fv)
> + break;
> +
> + ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
> + ICE_MAX_FV_WORDS);
> +
> + /* Determine empty field vector indices, these can be
> + * used for recipe results. Skip index 0, since it is
> + * always used for Switch ID.
> + */
> + for (i = 1; i < ICE_MAX_FV_WORDS; i++)
> + if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
> + fv->ew[i].off == ICE_FV_OFFSET_INVAL)
> + ice_set_bit(i,
> + hw->switch_info-
> >prof_res_bm[off]);
> + } while (fv);
> +}
> +
> +/**
> + * ice_pkg_buf_free
> + * @hw: pointer to the HW structure
> + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + *
> + * Frees a package buffer
> + */
> +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
> +{
> + ice_free(hw, bld);
> +}
> +
> +/**
> + * ice_pkg_buf_reserve_section
> + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + * @count: the number of sections to reserve
> + *
> + * Reserves one or more section table entries in a package buffer. This
> routine
> + * can be called multiple times as long as they are made before calling
> + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
> + * is called once, the number of sections that can be allocated will not be
> able
> + * to be increased; not using all reserved sections is fine, but this will
> + * result in some wasted space in the buffer.
> + * Note: all package contents must be in Little Endian form.
> + */
> +enum ice_status
> +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
> +{
> + struct ice_buf_hdr *buf;
> + u16 section_count;
> + u16 data_end;
> +
> + if (!bld)
> + return ICE_ERR_PARAM;
> +
> + buf = (struct ice_buf_hdr *)&bld->buf;
> +
> + /* already an active section, can't increase table size */
> + section_count = LE16_TO_CPU(buf->section_count);
> + if (section_count > 0)
> + return ICE_ERR_CFG;
> +
> + if (bld->reserved_section_table_entries + count >
> ICE_MAX_S_COUNT)
> + return ICE_ERR_CFG;
> + bld->reserved_section_table_entries += count;
> +
> + data_end = LE16_TO_CPU(buf->data_end) +
> + FLEX_ARRAY_SIZE(buf, section_entry, count);
> + buf->data_end = CPU_TO_LE16(data_end);
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_pkg_buf_alloc_section
> + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + * @type: the section type value
> + * @size: the size of the section to reserve (in bytes)
> + *
> + * Reserves memory in the buffer for a section's content and updates the
> + * buffers' status accordingly. This routine returns a pointer to the first
> + * byte of the section start within the buffer, which is used to fill in the
> + * section contents.
> + * Note: all package contents must be in Little Endian form.
> + */
> +void *
> +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
> +{
> + struct ice_buf_hdr *buf;
> + u16 sect_count;
> + u16 data_end;
> +
> + if (!bld || !type || !size)
> + return NULL;
> +
> + buf = (struct ice_buf_hdr *)&bld->buf;
> +
> + /* check for enough space left in buffer */
> + data_end = LE16_TO_CPU(buf->data_end);
> +
> + /* section start must align on 4 byte boundary */
> + data_end = ICE_ALIGN(data_end, 4);
> +
> + if ((data_end + size) > ICE_MAX_S_DATA_END)
> + return NULL;
> +
> + /* check for more available section table entries */
> + sect_count = LE16_TO_CPU(buf->section_count);
> + if (sect_count < bld->reserved_section_table_entries) {
> + void *section_ptr = ((u8 *)buf) + data_end;
> +
> + buf->section_entry[sect_count].offset =
> CPU_TO_LE16(data_end);
> + buf->section_entry[sect_count].size = CPU_TO_LE16(size);
> + buf->section_entry[sect_count].type = CPU_TO_LE32(type);
> +
> + data_end += size;
> + buf->data_end = CPU_TO_LE16(data_end);
> +
> + buf->section_count = CPU_TO_LE16(sect_count + 1);
> + return section_ptr;
> + }
> +
> + /* no free section table entries */
> + return NULL;
> +}
> +
> +/**
> + * ice_pkg_buf_alloc_single_section
> + * @hw: pointer to the HW structure
> + * @type: the section type value
> + * @size: the size of the section to reserve (in bytes)
> + * @section: returns pointer to the section
> + *
> + * Allocates a package buffer with a single section.
> + * Note: all package contents must be in Little Endian form.
> + */
> +struct ice_buf_build *
> +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
> + void **section)
> +{
> + struct ice_buf_build *buf;
> +
> + if (!section)
> + return NULL;
> +
> + buf = ice_pkg_buf_alloc(hw);
> + if (!buf)
> + return NULL;
> +
> + if (ice_pkg_buf_reserve_section(buf, 1))
> + goto ice_pkg_buf_alloc_single_section_err;
> +
> + *section = ice_pkg_buf_alloc_section(buf, type, size);
> + if (!*section)
> + goto ice_pkg_buf_alloc_single_section_err;
> +
> + return buf;
> +
> +ice_pkg_buf_alloc_single_section_err:
> + ice_pkg_buf_free(hw, buf);
> + return NULL;
> +}
> +
> +/**
> + * ice_pkg_buf_get_active_sections
> + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + *
> + * Returns the number of active sections. Before using the package buffer
> + * in an update package command, the caller should make sure that there
> is at
> + * least one active section - otherwise, the buffer is not legal and should
> + * not be used.
> + * Note: all package contents must be in Little Endian form.
> + */
> +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
> +{
> + struct ice_buf_hdr *buf;
> +
> + if (!bld)
> + return 0;
> +
> + buf = (struct ice_buf_hdr *)&bld->buf;
> + return LE16_TO_CPU(buf->section_count);
> +}
> +
> +/**
> + * ice_pkg_buf
> + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + *
> + * Return a pointer to the buffer's header
> + */
> +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
> +{
> + if (bld)
> + return &bld->buf;
> +
> + return NULL;
> +}
> +
> +/**
> + * ice_find_buf_table
> + * @ice_seg: pointer to the ice segment
> + *
> + * Returns the address of the buffer table within the ice segment.
> + */
> +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
> +{
> + struct ice_nvm_table *nvms;
> +
> + nvms = (struct ice_nvm_table *)
> + (ice_seg->device_table +
> + LE32_TO_CPU(ice_seg->device_table_count));
> +
> + return (_FORCE_ struct ice_buf_table *)
> + (nvms->vers + LE32_TO_CPU(nvms->table_count));
> +}
> +
> +/**
> + * ice_pkg_val_buf
> + * @buf: pointer to the ice buffer
> + *
> + * This helper function validates a buffer's header.
> + */
> +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
> +{
> + struct ice_buf_hdr *hdr;
> + u16 section_count;
> + u16 data_end;
> +
> + hdr = (struct ice_buf_hdr *)buf->buf;
> + /* verify data */
> + section_count = LE16_TO_CPU(hdr->section_count);
> + if (section_count < ICE_MIN_S_COUNT || section_count >
> ICE_MAX_S_COUNT)
> + return NULL;
> +
> + data_end = LE16_TO_CPU(hdr->data_end);
> + if (data_end < ICE_MIN_S_DATA_END || data_end >
> ICE_MAX_S_DATA_END)
> + return NULL;
> +
> + return hdr;
> +}
> +
> +/**
> + * ice_pkg_enum_buf
> + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> + * @state: pointer to the enum state
> + *
> + * This function will enumerate all the buffers in the ice segment. The first
> + * call is made with the ice_seg parameter non-NULL; on subsequent calls,
> + * ice_seg is set to NULL which continues the enumeration. When the
> function
> + * returns a NULL pointer, then the end of the buffers has been reached,
> or an
> + * unexpected value has been detected (for example an invalid section
> count or
> + * an invalid buffer end value).
> + */
> +struct ice_buf_hdr *
> +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
> +{
> + if (ice_seg) {
> + state->buf_table = ice_find_buf_table(ice_seg);
> + if (!state->buf_table)
> + return NULL;
> +
> + state->buf_idx = 0;
> + return ice_pkg_val_buf(state->buf_table->buf_array);
> + }
> +
> + if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
> + return ice_pkg_val_buf(state->buf_table->buf_array +
> + state->buf_idx);
> + else
> + return NULL;
> +}
> +
> +/**
> + * ice_pkg_advance_sect
> + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> + * @state: pointer to the enum state
> + *
> + * This helper function will advance the section within the ice segment,
> + * also advancing the buffer if needed.
> + */
> +bool
> +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
> +{
> + if (!ice_seg && !state->buf)
> + return false;
> +
> + if (!ice_seg && state->buf)
> + if (++state->sect_idx < LE16_TO_CPU(state->buf-
> >section_count))
> + return true;
> +
> + state->buf = ice_pkg_enum_buf(ice_seg, state);
> + if (!state->buf)
> + return false;
> +
> + /* start of new buffer, reset section index */
> + state->sect_idx = 0;
> + return true;
> +}
> +
> +/**
> + * ice_pkg_enum_section
> + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> + * @state: pointer to the enum state
> + * @sect_type: section type to enumerate
> + *
> + * This function will enumerate all the sections of a particular type in the
> + * ice segment. The first call is made with the ice_seg parameter non-NULL;
> + * on subsequent calls, ice_seg is set to NULL which continues the
> enumeration.
> + * When the function returns a NULL pointer, then the end of the
> matching
> + * sections has been reached.
> + */
> +void *
> +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> + u32 sect_type)
> +{
> + u16 offset, size;
> +
> + if (ice_seg)
> + state->type = sect_type;
> +
> + if (!ice_pkg_advance_sect(ice_seg, state))
> + return NULL;
> +
> + /* scan for next matching section */
> + while (state->buf->section_entry[state->sect_idx].type !=
> + CPU_TO_LE32(state->type))
> + if (!ice_pkg_advance_sect(NULL, state))
> + return NULL;
> +
> + /* validate section */
> + offset = LE16_TO_CPU(state->buf->section_entry[state-
> >sect_idx].offset);
> + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
> + return NULL;
> +
> + size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
> + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
> + return NULL;
> +
> + /* make sure the section fits in the buffer */
> + if (offset + size > ICE_PKG_BUF_SIZE)
> + return NULL;
> +
> + state->sect_type =
> + LE32_TO_CPU(state->buf->section_entry[state-
> >sect_idx].type);
> +
> + /* calc pointer to this section */
> + state->sect = ((u8 *)state->buf) +
> + LE16_TO_CPU(state->buf->section_entry[state-
> >sect_idx].offset);
> +
> + return state->sect;
> +}
> +
> +/**
> + * ice_pkg_enum_entry
> + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> + * @state: pointer to the enum state
> + * @sect_type: section type to enumerate
> + * @offset: pointer to variable that receives the offset in the table
> (optional)
> + * @handler: function that handles access to the entries into the section
> type
> + *
> + * This function will enumerate all the entries in particular section type in
> + * the ice segment. The first call is made with the ice_seg parameter non-
> NULL;
> + * on subsequent calls, ice_seg is set to NULL which continues the
> enumeration.
> + * When the function returns a NULL pointer, then the end of the entries
> has
> + * been reached.
> + *
> + * Since each section may have a different header and entry size, the
> handler
> + * function is needed to determine the number and location entries in
> each
> + * section.
> + *
> + * The offset parameter is optional, but should be used for sections that
> + * contain an offset for each section table. For such cases, the section
> handler
> + * function must return the appropriate offset + index to give the
> absolution
> + * offset for each entry. For example, if the base for a section's header
> + * indicates a base offset of 10, and the index for the entry is 2, then
> + * section handler function should set the offset to 10 + 2 = 12.
> + */
> +void *
> +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> + u32 sect_type, u32 *offset,
> + void *(*handler)(u32 sect_type, void *section,
> + u32 index, u32 *offset))
> +{
> + void *entry;
> +
> + if (ice_seg) {
> + if (!handler)
> + return NULL;
> +
> + if (!ice_pkg_enum_section(ice_seg, state, sect_type))
> + return NULL;
> +
> + state->entry_idx = 0;
> + state->handler = handler;
> + } else {
> + state->entry_idx++;
> + }
> +
> + if (!state->handler)
> + return NULL;
> +
> + /* get entry */
> + entry = state->handler(state->sect_type, state->sect, state-
> >entry_idx,
> + offset);
> + if (!entry) {
> + /* end of a section, look for another section of this type */
> + if (!ice_pkg_enum_section(NULL, state, 0))
> + return NULL;
> +
> + state->entry_idx = 0;
> + entry = state->handler(state->sect_type, state->sect,
> + state->entry_idx, offset);
> + }
> +
> + return entry;
> +}
> +
> +/**
> + * ice_boost_tcam_handler
> + * @sect_type: section type
> + * @section: pointer to section
> + * @index: index of the boost TCAM entry to be returned
> + * @offset: pointer to receive absolute offset, always 0 for boost TCAM
> sections
> + *
> + * This is a callback function that can be passed to ice_pkg_enum_entry.
> + * Handles enumeration of individual boost TCAM entries.
> + */
> +static void *
> +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32
> *offset)
> +{
> + struct ice_boost_tcam_section *boost;
> +
> + if (!section)
> + return NULL;
> +
> + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
> + return NULL;
> +
> + if (index > ICE_MAX_BST_TCAMS_IN_BUF)
> + return NULL;
> +
> + if (offset)
> + *offset = 0;
> +
> + boost = (struct ice_boost_tcam_section *)section;
> + if (index >= LE16_TO_CPU(boost->count))
> + return NULL;
> +
> + return boost->tcam + index;
> +}
> +
> +/**
> + * ice_find_boost_entry
> + * @ice_seg: pointer to the ice segment (non-NULL)
> + * @addr: Boost TCAM address of entry to search for
> + * @entry: returns pointer to the entry
> + *
> + * Finds a particular Boost TCAM entry and returns a pointer to that entry
> + * if it is found. The ice_seg parameter must not be NULL since the first call
> + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment
> structure.
> + */
> +static enum ice_status
> +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
> + struct ice_boost_tcam_entry **entry)
> +{
> + struct ice_boost_tcam_entry *tcam;
> + struct ice_pkg_enum state;
> +
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + if (!ice_seg)
> + return ICE_ERR_PARAM;
> +
> + do {
> + tcam = (struct ice_boost_tcam_entry *)
> + ice_pkg_enum_entry(ice_seg, &state,
> + ICE_SID_RXPARSER_BOOST_TCAM,
> NULL,
> + ice_boost_tcam_handler);
> + if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
> + *entry = tcam;
> + return ICE_SUCCESS;
> + }
> +
> + ice_seg = NULL;
> + } while (tcam);
> +
> + *entry = NULL;
> + return ICE_ERR_CFG;
> +}
> +
> +/**
> + * ice_init_pkg_hints
> + * @hw: pointer to the HW structure
> + * @ice_seg: pointer to the segment of the package scan (non-NULL)
> + *
> + * This function will scan the package and save off relevant information
> + * (hints or metadata) for driver use. The ice_seg parameter must not be
> NULL
> + * since the first call to ice_enum_labels requires a pointer to an actual
> + * ice_seg structure.
> + */
> +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
> +{
> + struct ice_pkg_enum state;
> + char *label_name;
> + u16 val;
> + int i;
> +
> + ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
> + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> +
> + if (!ice_seg)
> + return;
> +
> + label_name = ice_enum_labels(ice_seg,
> ICE_SID_LBL_RXPARSER_TMEM, &state,
> + &val);
> +
> + while (label_name) {
> +/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */
> + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
> + /* check for a tunnel entry */
> + ice_add_tunnel_hint(hw, label_name, val);
> +
> + /* check for a dvm mode entry */
> + else if (!strncmp(label_name, ICE_DVM_PRE,
> strlen(ICE_DVM_PRE)))
> + ice_add_dvm_hint(hw, val, true);
> +
> + /* check for a svm mode entry */
> + else if (!strncmp(label_name, ICE_SVM_PRE,
> strlen(ICE_SVM_PRE)))
> + ice_add_dvm_hint(hw, val, false);
> +
> + label_name = ice_enum_labels(NULL, 0, &state, &val);
> + }
> +
> + /* Cache the appropriate boost TCAM entry pointers for tunnels */
> + for (i = 0; i < hw->tnl.count; i++) {
> + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
> + &hw->tnl.tbl[i].boost_entry);
> + if (hw->tnl.tbl[i].boost_entry)
> + hw->tnl.tbl[i].valid = true;
> + }
> +
> + /* Cache the appropriate boost TCAM entry pointers for DVM and
> SVM */
> + for (i = 0; i < hw->dvm_upd.count; i++)
> + ice_find_boost_entry(ice_seg, hw-
> >dvm_upd.tbl[i].boost_addr,
> + &hw->dvm_upd.tbl[i].boost_entry);
> +}
> +
> +/**
> + * ice_acquire_global_cfg_lock
> + * @hw: pointer to the HW structure
> + * @access: access type (read or write)
> + *
> + * This function will request ownership of the global config lock for reading
> + * or writing of the package. When attempting to obtain write access, the
> + * caller must check for the following two return values:
> + *
> + * ICE_SUCCESS - Means the caller has acquired the global config lock
> + * and can perform writing of the package.
> + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written
> the
> + * package or has found that no update was necessary; in
> + * this case, the caller can just skip performing any
> + * update of the package.
> + */
> +enum ice_status
> +ice_acquire_global_cfg_lock(struct ice_hw *hw,
> + enum ice_aq_res_access_type access)
> +{
> + enum ice_status status;
> +
> + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
> + ICE_GLOBAL_CFG_LOCK_TIMEOUT);
> +
> + if (status == ICE_ERR_AQ_NO_WORK)
> + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work
> to do\n");
> +
> + return status;
> +}
> +
> +/**
> + * ice_release_global_cfg_lock
> + * @hw: pointer to the HW structure
> + *
> + * This function will release the global config lock.
> + */
> +void ice_release_global_cfg_lock(struct ice_hw *hw)
> +{
> + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
> +}
> +
> +/**
> + * ice_acquire_change_lock
> + * @hw: pointer to the HW structure
> + * @access: access type (read or write)
> + *
> + * This function will request ownership of the change lock.
> + */
> +enum ice_status
> +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type
> access)
> +{
> + return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
> + ICE_CHANGE_LOCK_TIMEOUT);
> +}
> +
> +/**
> + * ice_release_change_lock
> + * @hw: pointer to the HW structure
> + *
> + * This function will release the change lock using the proper Admin
> Command.
> + */
> +void ice_release_change_lock(struct ice_hw *hw)
> +{
> + ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
> +}
> +
> +/**
> + * ice_get_set_tx_topo - get or set tx topology
> + * @hw: pointer to the HW struct
> + * @buf: pointer to tx topology buffer
> + * @buf_size: buffer size
> + * @cd: pointer to command details structure or NULL
> + * @flags: pointer to descriptor flags
> + * @set: 0-get, 1-set topology
> + *
> + * The function will get or set tx topology
> + */
> +static enum ice_status
> +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
> + struct ice_sq_cd *cd, u8 *flags, bool set)
> +{
> + struct ice_aqc_get_set_tx_topo *cmd;
> + struct ice_aq_desc desc;
> + enum ice_status status;
> +
> + cmd = &desc.params.get_set_tx_topo;
> + if (set) {
> + ice_fill_dflt_direct_cmd_desc(&desc,
> ice_aqc_opc_set_tx_topo);
> + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
> + /* requested to update a new topology, not a default
> topolgy */
> + if (buf)
> + cmd->set_flags |=
> ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
> +
> ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
> + } else {
> + ice_fill_dflt_direct_cmd_desc(&desc,
> ice_aqc_opc_get_tx_topo);
> + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
> + }
> + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
> + if (status)
> + return status;
> + /* read the return flag values (first byte) for get operation */
> + if (!set && flags)
> + *flags = desc.params.get_set_tx_topo.set_flags;
> +
> + return ICE_SUCCESS;
> +}
> +
> +/**
> + * ice_cfg_tx_topo - Initialize new tx topology if available
> + * @hw: pointer to the HW struct
> + * @buf: pointer to Tx topology buffer
> + * @len: buffer size
> + *
> + * The function will apply the new Tx topology from the package buffer
> + * if available.
> + */
> +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
> +{
> + u8 *current_topo, *new_topo = NULL;
> + struct ice_run_time_cfg_seg *seg;
> + struct ice_buf_hdr *section;
> + struct ice_pkg_hdr *pkg_hdr;
> + enum ice_ddp_state state;
> + u16 i, size = 0, offset;
> + enum ice_status status;
> + u32 reg = 0;
> + u8 flags;
> +
> + if (!buf || !len)
> + return ICE_ERR_PARAM;
> +
> + /* Does FW support new Tx topology mode ? */
> + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
> + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support
> compatibility mode\n");
> + return ICE_ERR_NOT_SUPPORTED;
> + }
> +
> + current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
> + if (!current_topo)
> + return ICE_ERR_NO_MEMORY;
> +
> + /* get the current Tx topology */
> + status = ice_get_set_tx_topo(hw, current_topo,
> ICE_AQ_MAX_BUF_LEN, NULL,
> + &flags, false);
> + ice_free(hw, current_topo);
> +
> + if (status) {
> + ice_debug(hw, ICE_DBG_INIT, "Get current topology is
> failed\n");
> + return status;
> + }
> +
> + /* Is default topology already applied ? */
> + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
> + hw->num_tx_sched_layers == 9) {
> + ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n");
> + /* Already default topology is loaded */
> + return ICE_ERR_ALREADY_EXISTS;
> + }
> +
> + /* Is new topology already applied ? */
> + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
> + hw->num_tx_sched_layers == 5) {
> + ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n");
> + /* Already new topology is loaded */
> + return ICE_ERR_ALREADY_EXISTS;
> + }
> +
> + /* Is set topology issued already ? */
> + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
> + ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done
> by another PF\n");
> + /* add a small delay before exiting */
> + for (i = 0; i < 20; i++)
> + ice_msec_delay(100, true);
> + return ICE_ERR_ALREADY_EXISTS;
> + }
> +
> + /* Change the topology from new to default (5 to 9) */
> + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
> + hw->num_tx_sched_layers == 5) {
> + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9
> layers\n");
> + goto update_topo;
> + }
> +
> + pkg_hdr = (struct ice_pkg_hdr *)buf;
> + state = ice_verify_pkg(pkg_hdr, len);
> + if (state) {
> + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg
> (err: %d)\n",
> + state);
> + return ICE_ERR_CFG;
> + }
> +
> + /* find run time configuration segment */
> + seg = (struct ice_run_time_cfg_seg *)
> + ice_find_seg_in_pkg(hw,
> SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
> + if (!seg) {
> + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is
> missing\n");
> + return ICE_ERR_CFG;
> + }
> +
> + if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
> + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment
> count(%d) is wrong\n",
> + seg->buf_table.buf_count);
> + return ICE_ERR_CFG;
> + }
> +
> + section = ice_pkg_val_buf(seg->buf_table.buf_array);
> +
> + if (!section || LE32_TO_CPU(section->section_entry[0].type) !=
> + ICE_SID_TX_5_LAYER_TOPO) {
> + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type
> is wrong\n");
> + return ICE_ERR_CFG;
> + }
> +
> + size = LE16_TO_CPU(section->section_entry[0].size);
> + offset = LE16_TO_CPU(section->section_entry[0].offset);
> + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
> + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size
> is wrong\n");
> + return ICE_ERR_CFG;
> + }
> +
> + /* make sure the section fits in the buffer */
> + if (offset + size > ICE_PKG_BUF_SIZE) {
> + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer >
> 4K\n");
> + return ICE_ERR_CFG;
> + }
> +
> + /* Get the new topology buffer */
> + new_topo = ((u8 *)section) + offset;
> +
> +update_topo:
> + /* acquire global lock to make sure that set topology issued
> + * by one PF
> + */
> + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID,
> ICE_RES_WRITE,
> + ICE_GLOBAL_CFG_LOCK_TIMEOUT);
> + if (status) {
> + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global
> lock\n");
> + return status;
> + }
> +
> + /* check reset was triggered already or not */
> + reg = rd32(hw, GLGEN_RSTAT);
> + if (reg & GLGEN_RSTAT_DEVSTATE_M) {
> + /* Reset is in progress, re-init the hw again */
> + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer
> topology might be applied already\n");
> + ice_check_reset(hw);
> + return ICE_SUCCESS;
> + }
> +
> + /* set new topology */
> + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
> + if (status) {
> + ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n");
> + return status;
> + }
> +
> + /* new topology is updated, delay 1 second before issuing the
> CORRER */
> + for (i = 0; i < 10; i++)
> + ice_msec_delay(100, true);
> + ice_reset(hw, ICE_RESET_CORER);
> + /* CORER will clear the global lock, so no explicit call
> + * required for release
> + */
> + return ICE_SUCCESS;
> +}
> diff --git a/drivers/net/ice/base/ice_ddp.h b/drivers/net/ice/base/ice_ddp.h
> new file mode 100644
> index 0000000000..53bbbe2a5a
> --- /dev/null
> +++ b/drivers/net/ice/base/ice_ddp.h
> @@ -0,0 +1,466 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2001-2022 Intel Corporation
> + */
> +
> +#ifndef _ICE_DDP_H_
> +#define _ICE_DDP_H_
> +
> +#include "ice_osdep.h"
> +#include "ice_adminq_cmd.h"
> +#include "ice_controlq.h"
> +#include "ice_status.h"
> +#include "ice_flex_type.h"
> +#include "ice_protocol_type.h"
> +
> +/* Package minimal version supported */
> +#define ICE_PKG_SUPP_VER_MAJ 1
> +#define ICE_PKG_SUPP_VER_MNR 3
> +
> +/* Package format version */
> +#define ICE_PKG_FMT_VER_MAJ 1
> +#define ICE_PKG_FMT_VER_MNR 0
> +#define ICE_PKG_FMT_VER_UPD 0
> +#define ICE_PKG_FMT_VER_DFT 0
> +
> +#define ICE_PKG_CNT 4
> +
> +enum ice_ddp_state {
> + /* Indicates that this call to ice_init_pkg
> + * successfully loaded the requested DDP package
> + */
> + ICE_DDP_PKG_SUCCESS = 0,
> +
> + /* Generic error for already loaded errors, it is mapped later to
> + * the more specific one (one of the next 3)
> + */
> + ICE_DDP_PKG_ALREADY_LOADED = -1,
> +
> + /* Indicates that a DDP package of the same version has already
> been
> + * loaded onto the device by a previous call or by another PF
> + */
> + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
> +
> + /* The device has a DDP package that is not supported by the driver
> */
> + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
> +
> + /* The device has a compatible package
> + * (but different from the request) already loaded
> + */
> + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
> +
> + /* The firmware loaded on the device is not compatible with
> + * the DDP package loaded
> + */
> + ICE_DDP_PKG_FW_MISMATCH = -5,
> +
> + /* The DDP package file is invalid */
> + ICE_DDP_PKG_INVALID_FILE = -6,
> +
> + /* The version of the DDP package provided is higher than
> + * the driver supports
> + */
> + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
> +
> + /* The version of the DDP package provided is lower than the
> + * driver supports
> + */
> + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
> +
> + /* Missing security manifest in DDP pkg */
> + ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
> +
> + /* The RSA signature of the DDP package file provided is invalid */
> + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
> +
> + /* The DDP package file security revision is too low and not
> + * supported by firmware
> + */
> + ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
> +
> + /* Manifest hash mismatch */
> + ICE_DDP_PKG_MANIFEST_INVALID = -12,
> +
> + /* Buffer hash mismatches manifest */
> + ICE_DDP_PKG_BUFFER_INVALID = -13,
> +
> + /* Other errors */
> + ICE_DDP_PKG_ERR = -14,
> +};
> +
> +/* Package and segment headers and tables */
> +struct ice_pkg_hdr {
> + struct ice_pkg_ver pkg_format_ver;
> + __le32 seg_count;
> + __le32 seg_offset[STRUCT_HACK_VAR_LEN];
> +};
> +
> +/* Package signing algorithm types */
> +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
> +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
> +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
> +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot
> Block */
> +
> +/* generic segment */
> +struct ice_generic_seg_hdr {
> +#define SEGMENT_TYPE_INVALID 0x00000000
> +#define SEGMENT_TYPE_METADATA 0x00000001
> +#define SEGMENT_TYPE_ICE_E810 0x00000010
> +#define SEGMENT_TYPE_SIGNING 0x00001001
> +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
> + __le32 seg_type;
> + struct ice_pkg_ver seg_format_ver;
> + __le32 seg_size;
> + char seg_id[ICE_PKG_NAME_SIZE];
> +};
> +
> +/* ice specific segment */
> +
> +union ice_device_id {
> + struct {
> + __le16 device_id;
> + __le16 vendor_id;
> + } dev_vend_id;
> + __le32 id;
> +};
> +
> +struct ice_device_id_entry {
> + union ice_device_id device;
> + union ice_device_id sub_device;
> +};
> +
> +struct ice_seg {
> + struct ice_generic_seg_hdr hdr;
> + __le32 device_table_count;
> + struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
> +};
> +
> +struct ice_nvm_table {
> + __le32 table_count;
> + __le32 vers[STRUCT_HACK_VAR_LEN];
> +};
> +
> +struct ice_buf {
> +#define ICE_PKG_BUF_SIZE 4096
> + u8 buf[ICE_PKG_BUF_SIZE];
> +};
> +
> +struct ice_buf_table {
> + __le32 buf_count;
> + struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
> +};
> +
> +struct ice_run_time_cfg_seg {
> + struct ice_generic_seg_hdr hdr;
> + u8 rsvd[8];
> + struct ice_buf_table buf_table;
> +};
> +
> +/* global metadata specific segment */
> +struct ice_global_metadata_seg {
> + struct ice_generic_seg_hdr hdr;
> + struct ice_pkg_ver pkg_ver;
> + __le32 rsvd;
> + char pkg_name[ICE_PKG_NAME_SIZE];
> +};
> +
> +#define ICE_MIN_S_OFF 12
> +#define ICE_MAX_S_OFF 4095
> +#define ICE_MIN_S_SZ 1
> +#define ICE_MAX_S_SZ 4084
> +
> +struct ice_sign_seg {
> + struct ice_generic_seg_hdr hdr;
> + __le32 seg_id;
> + __le32 sign_type;
> + __le32 signed_seg_idx;
> + __le32 signed_buf_start;
> + __le32 signed_buf_count;
> +#define ICE_SIGN_SEG_RESERVED_COUNT 44
> + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
> + struct ice_buf_table buf_tbl;
> +};
> +
> +/* section information */
> +struct ice_section_entry {
> + __le32 type;
> + __le16 offset;
> + __le16 size;
> +};
> +
> +#define ICE_MIN_S_COUNT 1
> +#define ICE_MAX_S_COUNT 511
> +#define ICE_MIN_S_DATA_END 12
> +#define ICE_MAX_S_DATA_END 4096
> +
> +#define ICE_METADATA_BUF 0x80000000
> +
> +struct ice_buf_hdr {
> + __le16 section_count;
> + __le16 data_end;
> + struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
> +};
> +
> +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
> + ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
> + (ent_sz))
> +
> +/* ice package section IDs */
> +#define ICE_SID_METADATA 1
> +#define ICE_SID_XLT0_SW 10
> +#define ICE_SID_XLT_KEY_BUILDER_SW 11
> +#define ICE_SID_XLT1_SW 12
> +#define ICE_SID_XLT2_SW 13
> +#define ICE_SID_PROFID_TCAM_SW 14
> +#define ICE_SID_PROFID_REDIR_SW 15
> +#define ICE_SID_FLD_VEC_SW 16
> +#define ICE_SID_CDID_KEY_BUILDER_SW 17
> +#define ICE_SID_CDID_REDIR_SW 18
> +
> +#define ICE_SID_XLT0_ACL 20
> +#define ICE_SID_XLT_KEY_BUILDER_ACL 21
> +#define ICE_SID_XLT1_ACL 22
> +#define ICE_SID_XLT2_ACL 23
> +#define ICE_SID_PROFID_TCAM_ACL 24
> +#define ICE_SID_PROFID_REDIR_ACL 25
> +#define ICE_SID_FLD_VEC_ACL 26
> +#define ICE_SID_CDID_KEY_BUILDER_ACL 27
> +#define ICE_SID_CDID_REDIR_ACL 28
> +
> +#define ICE_SID_XLT0_FD 30
> +#define ICE_SID_XLT_KEY_BUILDER_FD 31
> +#define ICE_SID_XLT1_FD 32
> +#define ICE_SID_XLT2_FD 33
> +#define ICE_SID_PROFID_TCAM_FD 34
> +#define ICE_SID_PROFID_REDIR_FD 35
> +#define ICE_SID_FLD_VEC_FD 36
> +#define ICE_SID_CDID_KEY_BUILDER_FD 37
> +#define ICE_SID_CDID_REDIR_FD 38
> +
> +#define ICE_SID_XLT0_RSS 40
> +#define ICE_SID_XLT_KEY_BUILDER_RSS 41
> +#define ICE_SID_XLT1_RSS 42
> +#define ICE_SID_XLT2_RSS 43
> +#define ICE_SID_PROFID_TCAM_RSS 44
> +#define ICE_SID_PROFID_REDIR_RSS 45
> +#define ICE_SID_FLD_VEC_RSS 46
> +#define ICE_SID_CDID_KEY_BUILDER_RSS 47
> +#define ICE_SID_CDID_REDIR_RSS 48
> +
> +#define ICE_SID_RXPARSER_CAM 50
> +#define ICE_SID_RXPARSER_NOMATCH_CAM 51
> +#define ICE_SID_RXPARSER_IMEM 52
> +#define ICE_SID_RXPARSER_XLT0_BUILDER 53
> +#define ICE_SID_RXPARSER_NODE_PTYPE 54
> +#define ICE_SID_RXPARSER_MARKER_PTYPE 55
> +#define ICE_SID_RXPARSER_BOOST_TCAM 56
> +#define ICE_SID_RXPARSER_PROTO_GRP 57
> +#define ICE_SID_RXPARSER_METADATA_INIT 58
> +#define ICE_SID_RXPARSER_XLT0 59
> +
> +#define ICE_SID_TXPARSER_CAM 60
> +#define ICE_SID_TXPARSER_NOMATCH_CAM 61
> +#define ICE_SID_TXPARSER_IMEM 62
> +#define ICE_SID_TXPARSER_XLT0_BUILDER 63
> +#define ICE_SID_TXPARSER_NODE_PTYPE 64
> +#define ICE_SID_TXPARSER_MARKER_PTYPE 65
> +#define ICE_SID_TXPARSER_BOOST_TCAM 66
> +#define ICE_SID_TXPARSER_PROTO_GRP 67
> +#define ICE_SID_TXPARSER_METADATA_INIT 68
> +#define ICE_SID_TXPARSER_XLT0 69
> +
> +#define ICE_SID_RXPARSER_INIT_REDIR 70
> +#define ICE_SID_TXPARSER_INIT_REDIR 71
> +#define ICE_SID_RXPARSER_MARKER_GRP 72
> +#define ICE_SID_TXPARSER_MARKER_GRP 73
> +#define ICE_SID_RXPARSER_LAST_PROTO 74
> +#define ICE_SID_TXPARSER_LAST_PROTO 75
> +#define ICE_SID_RXPARSER_PG_SPILL 76
> +#define ICE_SID_TXPARSER_PG_SPILL 77
> +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
> +#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
> +
> +#define ICE_SID_XLT0_PE 80
> +#define ICE_SID_XLT_KEY_BUILDER_PE 81
> +#define ICE_SID_XLT1_PE 82
> +#define ICE_SID_XLT2_PE 83
> +#define ICE_SID_PROFID_TCAM_PE 84
> +#define ICE_SID_PROFID_REDIR_PE 85
> +#define ICE_SID_FLD_VEC_PE 86
> +#define ICE_SID_CDID_KEY_BUILDER_PE 87
> +#define ICE_SID_CDID_REDIR_PE 88
> +
> +#define ICE_SID_RXPARSER_FLAG_REDIR 97
> +
> +/* Label Metadata section IDs */
> +#define ICE_SID_LBL_FIRST 0x80000010
> +#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
> +#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
> +#define ICE_SID_LBL_RESERVED_12 0x80000012
> +#define ICE_SID_LBL_RESERVED_13 0x80000013
> +#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
> +#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
> +#define ICE_SID_LBL_PTYPE 0x80000016
> +#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
> +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
> +#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
> +#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
> +#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
> +#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
> +#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
> +#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
> +#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
> +#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
> +#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
> +#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
> +#define ICE_SID_LBL_FLAG 0x80000023
> +#define ICE_SID_LBL_REG 0x80000024
> +#define ICE_SID_LBL_SW_PTG 0x80000025
> +#define ICE_SID_LBL_ACL_PTG 0x80000026
> +#define ICE_SID_LBL_PE_PTG 0x80000027
> +#define ICE_SID_LBL_RSS_PTG 0x80000028
> +#define ICE_SID_LBL_FD_PTG 0x80000029
> +#define ICE_SID_LBL_SW_VSIG 0x8000002A
> +#define ICE_SID_LBL_ACL_VSIG 0x8000002B
> +#define ICE_SID_LBL_PE_VSIG 0x8000002C
> +#define ICE_SID_LBL_RSS_VSIG 0x8000002D
> +#define ICE_SID_LBL_FD_VSIG 0x8000002E
> +#define ICE_SID_LBL_PTYPE_META 0x8000002F
> +#define ICE_SID_LBL_SW_PROFID 0x80000030
> +#define ICE_SID_LBL_ACL_PROFID 0x80000031
> +#define ICE_SID_LBL_PE_PROFID 0x80000032
> +#define ICE_SID_LBL_RSS_PROFID 0x80000033
> +#define ICE_SID_LBL_FD_PROFID 0x80000034
> +#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
> +#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
> +#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
> +#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
> +/* The following define MUST be updated to reflect the last label section
> ID */
> +#define ICE_SID_LBL_LAST 0x80000038
> +
> +/* Label ICE runtime configuration section IDs */
> +#define ICE_SID_TX_5_LAYER_TOPO 0x10
> +
> +enum ice_block {
> + ICE_BLK_SW = 0,
> + ICE_BLK_ACL,
> + ICE_BLK_FD,
> + ICE_BLK_RSS,
> + ICE_BLK_PE,
> + ICE_BLK_COUNT
> +};
> +
> +enum ice_sect {
> + ICE_XLT0 = 0,
> + ICE_XLT_KB,
> + ICE_XLT1,
> + ICE_XLT2,
> + ICE_PROF_TCAM,
> + ICE_PROF_REDIR,
> + ICE_VEC_TBL,
> + ICE_CDID_KB,
> + ICE_CDID_REDIR,
> + ICE_SECT_COUNT
> +};
> +
> +/* package buffer building */
> +
> +struct ice_buf_build {
> + struct ice_buf buf;
> + u16 reserved_section_table_entries;
> +};
> +
> +struct ice_pkg_enum {
> + struct ice_buf_table *buf_table;
> + u32 buf_idx;
> +
> + u32 type;
> + struct ice_buf_hdr *buf;
> + u32 sect_idx;
> + void *sect;
> + u32 sect_type;
> +
> + u32 entry_idx;
> + void *(*handler)(u32 sect_type, void *section, u32 index, u32
> *offset);
> +};
> +
> +/* package Marker PType TCAM entry */
> +struct ice_marker_ptype_tcam_entry {
> +#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
> + __le16 addr;
> + __le16 ptype;
> + u8 keys[20];
> +};
> +
> +struct ice_marker_ptype_tcam_section {
> + __le16 count;
> + __le16 reserved;
> + struct ice_marker_ptype_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
> +};
> +
> +#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF
> ICE_MAX_ENTRIES_IN_BUF( \
> + ice_struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1)
> - \
> + sizeof(struct ice_marker_ptype_tcam_entry), \
> + sizeof(struct ice_marker_ptype_tcam_entry))
> +
> +struct ice_hw;
> +
> +enum ice_status
> +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type
> access);
> +void ice_release_change_lock(struct ice_hw *hw);
> +
> +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
> +void *
> +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
> +enum ice_status
> +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
> +enum ice_status
> +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
> + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
> +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
> +
> +enum ice_status
> +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
> +enum ice_status
> +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32
> count);
> +void ice_release_global_cfg_lock(struct ice_hw *hw);
> +struct ice_generic_seg_hdr *
> +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
> + struct ice_pkg_hdr *pkg_hdr);
> +enum ice_ddp_state
> +ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
> +enum ice_ddp_state
> +ice_get_pkg_info(struct ice_hw *hw);
> +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
> +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
> +enum ice_status
> +ice_acquire_global_cfg_lock(struct ice_hw *hw,
> + enum ice_aq_res_access_type access);
> +
> +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
> +struct ice_buf_hdr *
> +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
> +bool
> +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
> +void *
> +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> + u32 sect_type, u32 *offset,
> + void *(*handler)(u32 sect_type, void *section,
> + u32 index, u32 *offset));
> +void *
> +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> + u32 sect_type);
> +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
> +enum ice_ddp_state
> +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
> +bool ice_is_init_pkg_successful(enum ice_ddp_state state);
> +void ice_free_seg(struct ice_hw *hw);
> +
> +struct ice_buf_build *
> +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
> + void **section);
> +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
> +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
> +
> +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
> +
> +#endif /* _ICE_DDP_H_ */
> diff --git a/drivers/net/ice/base/ice_defs.h
> b/drivers/net/ice/base/ice_defs.h
> new file mode 100644
> index 0000000000..6e886f6aac
> --- /dev/null
> +++ b/drivers/net/ice/base/ice_defs.h
> @@ -0,0 +1,49 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2001-2022 Intel Corporation
> + */
> +
> +#ifndef _ICE_DEFS_H_
> +#define _ICE_DEFS_H_
> +
> +#define ETH_ALEN 6
> +
> +#define ETH_HEADER_LEN 14
> +
> +#define BIT(a) (1UL << (a))
> +#define BIT_ULL(a) (1ULL << (a))
> +
> +#define BITS_PER_BYTE 8
> +
> +#define _FORCE_
> +
> +#define ICE_BYTES_PER_WORD 2
> +#define ICE_BYTES_PER_DWORD 4
> +#define ICE_MAX_TRAFFIC_CLASS 8
> +
> +/**
> + * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
> + * @a: value to round up
> + * @b: arbitrary multiple
> + *
> + * Round up to the next multiple of the arbitrary b.
> + * Note, when b is a power of 2 use ICE_ALIGN() instead.
> + */
> +#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
> +
> +#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
> +
> +#define IS_ASCII(_ch) ((_ch) < 0x80)
> +
> +#define STRUCT_HACK_VAR_LEN
> +/**
> + * ice_struct_size - size of struct with C99 flexible array member
> + * @ptr: pointer to structure
> + * @field: flexible array member (last member of the structure)
> + * @num: number of elements of that flexible array member
> + */
> +#define ice_struct_size(ptr, field, num) \
> + (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
> +
> +#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
> +
> +#endif /* _ICE_DEFS_H_ */
> diff --git a/drivers/net/ice/base/ice_flex_pipe.c
> b/drivers/net/ice/base/ice_flex_pipe.c
> index 3918169001..a43d7ef76b 100644
> --- a/drivers/net/ice/base/ice_flex_pipe.c
> +++ b/drivers/net/ice/base/ice_flex_pipe.c
> @@ -3,6 +3,7 @@
> */
>
> #include "ice_common.h"
> +#include "ice_ddp.h"
> #include "ice_flex_pipe.h"
> #include "ice_protocol_type.h"
> #include "ice_flow.h"
> @@ -106,2049 +107,224 @@ static u32 ice_sect_id(enum ice_block blk,
> enum ice_sect sect)
> }
>
> /**
> - * ice_pkg_val_buf
> - * @buf: pointer to the ice buffer
> - *
> - * This helper function validates a buffer's header.
> - */
> -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
> -{
> - struct ice_buf_hdr *hdr;
> - u16 section_count;
> - u16 data_end;
> -
> - hdr = (struct ice_buf_hdr *)buf->buf;
> - /* verify data */
> - section_count = LE16_TO_CPU(hdr->section_count);
> - if (section_count < ICE_MIN_S_COUNT || section_count >
> ICE_MAX_S_COUNT)
> - return NULL;
> -
> - data_end = LE16_TO_CPU(hdr->data_end);
> - if (data_end < ICE_MIN_S_DATA_END || data_end >
> ICE_MAX_S_DATA_END)
> - return NULL;
> -
> - return hdr;
> -}
> -
> -/**
> - * ice_find_buf_table
> - * @ice_seg: pointer to the ice segment
> - *
> - * Returns the address of the buffer table within the ice segment.
> - */
> -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
> -{
> - struct ice_nvm_table *nvms;
> -
> - nvms = (struct ice_nvm_table *)
> - (ice_seg->device_table +
> - LE32_TO_CPU(ice_seg->device_table_count));
> -
> - return (_FORCE_ struct ice_buf_table *)
> - (nvms->vers + LE32_TO_CPU(nvms->table_count));
> -}
> -
> -/**
> - * ice_pkg_enum_buf
> - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> - * @state: pointer to the enum state
> - *
> - * This function will enumerate all the buffers in the ice segment. The first
> - * call is made with the ice_seg parameter non-NULL; on subsequent calls,
> - * ice_seg is set to NULL which continues the enumeration. When the
> function
> - * returns a NULL pointer, then the end of the buffers has been reached, or
> an
> - * unexpected value has been detected (for example an invalid section
> count or
> - * an invalid buffer end value).
> - */
> -static struct ice_buf_hdr *
> -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
> -{
> - if (ice_seg) {
> - state->buf_table = ice_find_buf_table(ice_seg);
> - if (!state->buf_table)
> - return NULL;
> -
> - state->buf_idx = 0;
> - return ice_pkg_val_buf(state->buf_table->buf_array);
> - }
> -
> - if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
> - return ice_pkg_val_buf(state->buf_table->buf_array +
> - state->buf_idx);
> - else
> - return NULL;
> -}
> -
> -/**
> - * ice_pkg_advance_sect
> - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> - * @state: pointer to the enum state
> - *
> - * This helper function will advance the section within the ice segment,
> - * also advancing the buffer if needed.
> - */
> -static bool
> -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
> -{
> - if (!ice_seg && !state->buf)
> - return false;
> -
> - if (!ice_seg && state->buf)
> - if (++state->sect_idx < LE16_TO_CPU(state->buf-
> >section_count))
> - return true;
> -
> - state->buf = ice_pkg_enum_buf(ice_seg, state);
> - if (!state->buf)
> - return false;
> -
> - /* start of new buffer, reset section index */
> - state->sect_idx = 0;
> - return true;
> -}
> -
> -/**
> - * ice_pkg_enum_section
> - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> - * @state: pointer to the enum state
> - * @sect_type: section type to enumerate
> - *
> - * This function will enumerate all the sections of a particular type in the
> - * ice segment. The first call is made with the ice_seg parameter non-NULL;
> - * on subsequent calls, ice_seg is set to NULL which continues the
> enumeration.
> - * When the function returns a NULL pointer, then the end of the matching
> - * sections has been reached.
> - */
> -void *
> -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> - u32 sect_type)
> -{
> - u16 offset, size;
> -
> - if (ice_seg)
> - state->type = sect_type;
> -
> - if (!ice_pkg_advance_sect(ice_seg, state))
> - return NULL;
> -
> - /* scan for next matching section */
> - while (state->buf->section_entry[state->sect_idx].type !=
> - CPU_TO_LE32(state->type))
> - if (!ice_pkg_advance_sect(NULL, state))
> - return NULL;
> -
> - /* validate section */
> - offset = LE16_TO_CPU(state->buf->section_entry[state-
> >sect_idx].offset);
> - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
> - return NULL;
> -
> - size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
> - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
> - return NULL;
> -
> - /* make sure the section fits in the buffer */
> - if (offset + size > ICE_PKG_BUF_SIZE)
> - return NULL;
> -
> - state->sect_type =
> - LE32_TO_CPU(state->buf->section_entry[state-
> >sect_idx].type);
> -
> - /* calc pointer to this section */
> - state->sect = ((u8 *)state->buf) +
> - LE16_TO_CPU(state->buf->section_entry[state-
> >sect_idx].offset);
> -
> - return state->sect;
> -}
> -
> -/**
> - * ice_pkg_enum_entry
> - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
> - * @state: pointer to the enum state
> - * @sect_type: section type to enumerate
> - * @offset: pointer to variable that receives the offset in the table
> (optional)
> - * @handler: function that handles access to the entries into the section
> type
> - *
> - * This function will enumerate all the entries in particular section type in
> - * the ice segment. The first call is made with the ice_seg parameter non-
> NULL;
> - * on subsequent calls, ice_seg is set to NULL which continues the
> enumeration.
> - * When the function returns a NULL pointer, then the end of the entries
> has
> - * been reached.
> - *
> - * Since each section may have a different header and entry size, the
> handler
> - * function is needed to determine the number and location entries in
> each
> - * section.
> - *
> - * The offset parameter is optional, but should be used for sections that
> - * contain an offset for each section table. For such cases, the section
> handler
> - * function must return the appropriate offset + index to give the
> absolution
> - * offset for each entry. For example, if the base for a section's header
> - * indicates a base offset of 10, and the index for the entry is 2, then
> - * section handler function should set the offset to 10 + 2 = 12.
> - */
> -void *
> -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> - u32 sect_type, u32 *offset,
> - void *(*handler)(u32 sect_type, void *section,
> - u32 index, u32 *offset))
> -{
> - void *entry;
> -
> - if (ice_seg) {
> - if (!handler)
> - return NULL;
> -
> - if (!ice_pkg_enum_section(ice_seg, state, sect_type))
> - return NULL;
> -
> - state->entry_idx = 0;
> - state->handler = handler;
> - } else {
> - state->entry_idx++;
> - }
> -
> - if (!state->handler)
> - return NULL;
> -
> - /* get entry */
> - entry = state->handler(state->sect_type, state->sect, state-
> >entry_idx,
> - offset);
> - if (!entry) {
> - /* end of a section, look for another section of this type */
> - if (!ice_pkg_enum_section(NULL, state, 0))
> - return NULL;
> -
> - state->entry_idx = 0;
> - entry = state->handler(state->sect_type, state->sect,
> - state->entry_idx, offset);
> - }
> -
> - return entry;
> -}
> -
> -/**
> - * ice_hw_ptype_ena - check if the PTYPE is enabled or not
> - * @hw: pointer to the HW structure
> - * @ptype: the hardware PTYPE
> - */
> -bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
> -{
> - return ptype < ICE_FLOW_PTYPE_MAX &&
> - ice_is_bit_set(hw->hw_ptype, ptype);
> -}
> -
> -/**
> - * ice_marker_ptype_tcam_handler
> - * @sect_type: section type
> - * @section: pointer to section
> - * @index: index of the Marker PType TCAM entry to be returned
> - * @offset: pointer to receive absolute offset, always 0 for ptype TCAM
> sections
> - *
> - * This is a callback function that can be passed to ice_pkg_enum_entry.
> - * Handles enumeration of individual Marker PType TCAM entries.
> - */
> -static void *
> -ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
> - u32 *offset)
> -{
> - struct ice_marker_ptype_tcam_section *marker_ptype;
> -
> - if (!section)
> - return NULL;
> -
> - if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
> - return NULL;
> -
> - if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
> - return NULL;
> -
> - if (offset)
> - *offset = 0;
> -
> - marker_ptype = (struct ice_marker_ptype_tcam_section *)section;
> - if (index >= LE16_TO_CPU(marker_ptype->count))
> - return NULL;
> -
> - return marker_ptype->tcam + index;
> -}
> -
> -/**
> - * ice_fill_hw_ptype - fill the enabled PTYPE bit information
> - * @hw: pointer to the HW structure
> - */
> -static void
> -ice_fill_hw_ptype(struct ice_hw *hw)
> -{
> - struct ice_marker_ptype_tcam_entry *tcam;
> - struct ice_seg *seg = hw->seg;
> - struct ice_pkg_enum state;
> -
> - ice_zero_bitmap(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
> - if (!seg)
> - return;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - do {
> - tcam = (struct ice_marker_ptype_tcam_entry *)
> - ice_pkg_enum_entry(seg, &state,
> - ICE_SID_RXPARSER_MARKER_PTYPE,
> NULL,
> - ice_marker_ptype_tcam_handler);
> - if (tcam &&
> - LE16_TO_CPU(tcam->addr) <
> ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
> - LE16_TO_CPU(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
> - ice_set_bit(LE16_TO_CPU(tcam->ptype), hw-
> >hw_ptype);
> -
> - seg = NULL;
> - } while (tcam);
> -}
> -
> -/**
> - * ice_boost_tcam_handler
> - * @sect_type: section type
> - * @section: pointer to section
> - * @index: index of the boost TCAM entry to be returned
> - * @offset: pointer to receive absolute offset, always 0 for boost TCAM
> sections
> - *
> - * This is a callback function that can be passed to ice_pkg_enum_entry.
> - * Handles enumeration of individual boost TCAM entries.
> - */
> -static void *
> -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32
> *offset)
> -{
> - struct ice_boost_tcam_section *boost;
> -
> - if (!section)
> - return NULL;
> -
> - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
> - return NULL;
> -
> - if (index > ICE_MAX_BST_TCAMS_IN_BUF)
> - return NULL;
> -
> - if (offset)
> - *offset = 0;
> -
> - boost = (struct ice_boost_tcam_section *)section;
> - if (index >= LE16_TO_CPU(boost->count))
> - return NULL;
> -
> - return boost->tcam + index;
> -}
> -
> -/**
> - * ice_find_boost_entry
> - * @ice_seg: pointer to the ice segment (non-NULL)
> - * @addr: Boost TCAM address of entry to search for
> - * @entry: returns pointer to the entry
> - *
> - * Finds a particular Boost TCAM entry and returns a pointer to that entry
> - * if it is found. The ice_seg parameter must not be NULL since the first call
> - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment
> structure.
> - */
> -static enum ice_status
> -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
> - struct ice_boost_tcam_entry **entry)
> -{
> - struct ice_boost_tcam_entry *tcam;
> - struct ice_pkg_enum state;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - if (!ice_seg)
> - return ICE_ERR_PARAM;
> -
> - do {
> - tcam = (struct ice_boost_tcam_entry *)
> - ice_pkg_enum_entry(ice_seg, &state,
> - ICE_SID_RXPARSER_BOOST_TCAM,
> NULL,
> - ice_boost_tcam_handler);
> - if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
> - *entry = tcam;
> - return ICE_SUCCESS;
> - }
> -
> - ice_seg = NULL;
> - } while (tcam);
> -
> - *entry = NULL;
> - return ICE_ERR_CFG;
> -}
> -
> -/**
> - * ice_label_enum_handler
> - * @sect_type: section type
> - * @section: pointer to section
> - * @index: index of the label entry to be returned
> - * @offset: pointer to receive absolute offset, always zero for label sections
> - *
> - * This is a callback function that can be passed to ice_pkg_enum_entry.
> - * Handles enumeration of individual label entries.
> - */
> -static void *
> -ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section,
> u32 index,
> - u32 *offset)
> -{
> - struct ice_label_section *labels;
> -
> - if (!section)
> - return NULL;
> -
> - if (index > ICE_MAX_LABELS_IN_BUF)
> - return NULL;
> -
> - if (offset)
> - *offset = 0;
> -
> - labels = (struct ice_label_section *)section;
> - if (index >= LE16_TO_CPU(labels->count))
> - return NULL;
> -
> - return labels->label + index;
> -}
> -
> -/**
> - * ice_enum_labels
> - * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
> - * @type: the section type that will contain the label (0 on subsequent
> calls)
> - * @state: ice_pkg_enum structure that will hold the state of the
> enumeration
> - * @value: pointer to a value that will return the label's value if found
> - *
> - * Enumerates a list of labels in the package. The caller will call
> - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
> - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a
> NULL
> - * the end of the list has been reached.
> - */
> -static char *
> -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum
> *state,
> - u16 *value)
> -{
> - struct ice_label *label;
> -
> - /* Check for valid label section on first call */
> - if (type && !(type >= ICE_SID_LBL_FIRST && type <=
> ICE_SID_LBL_LAST))
> - return NULL;
> -
> - label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
> - NULL,
> -
> ice_label_enum_handler);
> - if (!label)
> - return NULL;
> -
> - *value = LE16_TO_CPU(label->value);
> - return label->name;
> -}
> -
> -/**
> - * ice_add_tunnel_hint
> - * @hw: pointer to the HW structure
> - * @label_name: label text
> - * @val: value of the tunnel port boost entry
> - */
> -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16
> val)
> -{
> - if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
> - u16 i;
> -
> - for (i = 0; tnls[i].type != TNL_LAST; i++) {
> - size_t len = strlen(tnls[i].label_prefix);
> -
> - /* Look for matching label start, before continuing
> */
> - if (strncmp(label_name, tnls[i].label_prefix, len))
> - continue;
> -
> - /* Make sure this label matches our PF. Note that
> the PF
> - * character ('0' - '7') will be located where our
> - * prefix string's null terminator is located.
> - */
> - if ((label_name[len] - '0') == hw->pf_id) {
> - hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
> - hw->tnl.tbl[hw->tnl.count].valid = false;
> - hw->tnl.tbl[hw->tnl.count].in_use = false;
> - hw->tnl.tbl[hw->tnl.count].marked = false;
> - hw->tnl.tbl[hw->tnl.count].boost_addr = val;
> - hw->tnl.tbl[hw->tnl.count].port = 0;
> - hw->tnl.count++;
> - break;
> - }
> - }
> - }
> -}
> -
> -/**
> - * ice_add_dvm_hint
> - * @hw: pointer to the HW structure
> - * @val: value of the boost entry
> - * @enable: true if entry needs to be enabled, or false if needs to be
> disabled
> - */
> -static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
> -{
> - if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
> - hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
> - hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
> - hw->dvm_upd.count++;
> - }
> -}
> -
> -/**
> - * ice_init_pkg_hints
> - * @hw: pointer to the HW structure
> - * @ice_seg: pointer to the segment of the package scan (non-NULL)
> - *
> - * This function will scan the package and save off relevant information
> - * (hints or metadata) for driver use. The ice_seg parameter must not be
> NULL
> - * since the first call to ice_enum_labels requires a pointer to an actual
> - * ice_seg structure.
> - */
> -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
> -{
> - struct ice_pkg_enum state;
> - char *label_name;
> - u16 val;
> - int i;
> -
> - ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - if (!ice_seg)
> - return;
> -
> - label_name = ice_enum_labels(ice_seg,
> ICE_SID_LBL_RXPARSER_TMEM, &state,
> - &val);
> -
> - while (label_name) {
> - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
> - /* check for a tunnel entry */
> - ice_add_tunnel_hint(hw, label_name, val);
> -
> - /* check for a dvm mode entry */
> - else if (!strncmp(label_name, ICE_DVM_PRE,
> strlen(ICE_DVM_PRE)))
> - ice_add_dvm_hint(hw, val, true);
> -
> - /* check for a svm mode entry */
> - else if (!strncmp(label_name, ICE_SVM_PRE,
> strlen(ICE_SVM_PRE)))
> - ice_add_dvm_hint(hw, val, false);
> -
> - label_name = ice_enum_labels(NULL, 0, &state, &val);
> - }
> -
> - /* Cache the appropriate boost TCAM entry pointers for tunnels */
> - for (i = 0; i < hw->tnl.count; i++) {
> - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
> - &hw->tnl.tbl[i].boost_entry);
> - if (hw->tnl.tbl[i].boost_entry)
> - hw->tnl.tbl[i].valid = true;
> - }
> -
> - /* Cache the appropriate boost TCAM entry pointers for DVM and
> SVM */
> - for (i = 0; i < hw->dvm_upd.count; i++)
> - ice_find_boost_entry(ice_seg, hw-
> >dvm_upd.tbl[i].boost_addr,
> - &hw->dvm_upd.tbl[i].boost_entry);
> -}
> -
> -/* Key creation */
> -
> -#define ICE_DC_KEY 0x1 /* don't care */
> -#define ICE_DC_KEYINV 0x1
> -#define ICE_NM_KEY 0x0 /* never match */
> -#define ICE_NM_KEYINV 0x0
> -#define ICE_0_KEY 0x1 /* match 0 */
> -#define ICE_0_KEYINV 0x0
> -#define ICE_1_KEY 0x0 /* match 1 */
> -#define ICE_1_KEYINV 0x1
> -
> -/**
> - * ice_gen_key_word - generate 16-bits of a key/mask word
> - * @val: the value
> - * @valid: valid bits mask (change only the valid bits)
> - * @dont_care: don't care mask
> - * @nvr_mtch: never match mask
> - * @key: pointer to an array of where the resulting key portion
> - * @key_inv: pointer to an array of where the resulting key invert portion
> - *
> - * This function generates 16-bits from a 8-bit value, an 8-bit don't care
> mask
> - * and an 8-bit never match mask. The 16-bits of output are divided into 8
> bits
> - * of key and 8 bits of key invert.
> - *
> - * '0' = b01, always match a 0 bit
> - * '1' = b10, always match a 1 bit
> - * '?' = b11, don't care bit (always matches)
> - * '~' = b00, never match bit
> - *
> - * Input:
> - * val: b0 1 0 1 0 1
> - * dont_care: b0 0 1 1 0 0
> - * never_mtch: b0 0 0 0 1 1
> - * ------------------------------
> - * Result: key: b01 10 11 11 00 00
> - */
> -static enum ice_status
> -ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
> - u8 *key_inv)
> -{
> - u8 in_key = *key, in_key_inv = *key_inv;
> - u8 i;
> -
> - /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
> - if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
> - return ICE_ERR_CFG;
> -
> - *key = 0;
> - *key_inv = 0;
> -
> - /* encode the 8 bits into 8-bit key and 8-bit key invert */
> - for (i = 0; i < 8; i++) {
> - *key >>= 1;
> - *key_inv >>= 1;
> -
> - if (!(valid & 0x1)) { /* change only valid bits */
> - *key |= (in_key & 0x1) << 7;
> - *key_inv |= (in_key_inv & 0x1) << 7;
> - } else if (dont_care & 0x1) { /* don't care bit */
> - *key |= ICE_DC_KEY << 7;
> - *key_inv |= ICE_DC_KEYINV << 7;
> - } else if (nvr_mtch & 0x1) { /* never match bit */
> - *key |= ICE_NM_KEY << 7;
> - *key_inv |= ICE_NM_KEYINV << 7;
> - } else if (val & 0x01) { /* exact 1 match */
> - *key |= ICE_1_KEY << 7;
> - *key_inv |= ICE_1_KEYINV << 7;
> - } else { /* exact 0 match */
> - *key |= ICE_0_KEY << 7;
> - *key_inv |= ICE_0_KEYINV << 7;
> - }
> -
> - dont_care >>= 1;
> - nvr_mtch >>= 1;
> - valid >>= 1;
> - val >>= 1;
> - in_key >>= 1;
> - in_key_inv >>= 1;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_bits_max_set - determine if the number of bits set is within a
> maximum
> - * @mask: pointer to the byte array which is the mask
> - * @size: the number of bytes in the mask
> - * @max: the max number of set bits
> - *
> - * This function determines if there are at most 'max' number of bits set in
> an
> - * array. Returns true if the number for bits set is <= max or will return
> false
> - * otherwise.
> - */
> -static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
> -{
> - u16 count = 0;
> - u16 i;
> -
> - /* check each byte */
> - for (i = 0; i < size; i++) {
> - /* if 0, go to next byte */
> - if (!mask[i])
> - continue;
> -
> - /* We know there is at least one set bit in this byte because
> of
> - * the above check; if we already have found 'max' number
> of
> - * bits set, then we can return failure now.
> - */
> - if (count == max)
> - return false;
> -
> - /* count the bits in this byte, checking threshold */
> - count += ice_hweight8(mask[i]);
> - if (count > max)
> - return false;
> - }
> -
> - return true;
> -}
> -
> -/**
> - * ice_set_key - generate a variable sized key with multiples of 16-bits
> - * @key: pointer to where the key will be stored
> - * @size: the size of the complete key in bytes (must be even)
> - * @val: array of 8-bit values that makes up the value portion of the key
> - * @upd: array of 8-bit masks that determine what key portion to update
> - * @dc: array of 8-bit masks that make up the don't care mask
> - * @nm: array of 8-bit masks that make up the never match mask
> - * @off: the offset of the first byte in the key to update
> - * @len: the number of bytes in the key update
> - *
> - * This function generates a key from a value, a don't care mask and a
> never
> - * match mask.
> - * upd, dc, and nm are optional parameters, and can be NULL:
> - * upd == NULL --> upd mask is all 1's (update all bits)
> - * dc == NULL --> dc mask is all 0's (no don't care bits)
> - * nm == NULL --> nm mask is all 0's (no never match bits)
> - */
> -enum ice_status
> -ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
> - u16 len)
> -{
> - u16 half_size;
> - u16 i;
> -
> - /* size must be a multiple of 2 bytes. */
> - if (size % 2)
> - return ICE_ERR_CFG;
> - half_size = size / 2;
> -
> - if (off + len > half_size)
> - return ICE_ERR_CFG;
> -
> - /* Make sure at most one bit is set in the never match mask. Having
> more
> - * than one never match mask bit set will cause HW to consume
> excessive
> - * power otherwise; this is a power management efficiency check.
> - */
> -#define ICE_NVR_MTCH_BITS_MAX 1
> - if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
> - return ICE_ERR_CFG;
> -
> - for (i = 0; i < len; i++)
> - if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
> - dc ? dc[i] : 0, nm ? nm[i] : 0,
> - key + off + i, key + half_size + off + i))
> - return ICE_ERR_CFG;
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_acquire_global_cfg_lock
> - * @hw: pointer to the HW structure
> - * @access: access type (read or write)
> - *
> - * This function will request ownership of the global config lock for reading
> - * or writing of the package. When attempting to obtain write access, the
> - * caller must check for the following two return values:
> - *
> - * ICE_SUCCESS - Means the caller has acquired the global config lock
> - * and can perform writing of the package.
> - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written
> the
> - * package or has found that no update was necessary; in
> - * this case, the caller can just skip performing any
> - * update of the package.
> - */
> -static enum ice_status
> -ice_acquire_global_cfg_lock(struct ice_hw *hw,
> - enum ice_aq_res_access_type access)
> -{
> - enum ice_status status;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
> - ICE_GLOBAL_CFG_LOCK_TIMEOUT);
> -
> - if (status == ICE_ERR_AQ_NO_WORK)
> - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work
> to do\n");
> -
> - return status;
> -}
> -
> -/**
> - * ice_release_global_cfg_lock
> - * @hw: pointer to the HW structure
> - *
> - * This function will release the global config lock.
> - */
> -static void ice_release_global_cfg_lock(struct ice_hw *hw)
> -{
> - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
> -}
> -
> -/**
> - * ice_acquire_change_lock
> - * @hw: pointer to the HW structure
> - * @access: access type (read or write)
> - *
> - * This function will request ownership of the change lock.
> - */
> -enum ice_status
> -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type
> access)
> -{
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
> - ICE_CHANGE_LOCK_TIMEOUT);
> -}
> -
> -/**
> - * ice_release_change_lock
> - * @hw: pointer to the HW structure
> - *
> - * This function will release the change lock using the proper Admin
> Command.
> - */
> -void ice_release_change_lock(struct ice_hw *hw)
> -{
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
> -}
> -
> -/**
> - * ice_aq_download_pkg
> - * @hw: pointer to the hardware structure
> - * @pkg_buf: the package buffer to transfer
> - * @buf_size: the size of the package buffer
> - * @last_buf: last buffer indicator
> - * @error_offset: returns error offset
> - * @error_info: returns error information
> - * @cd: pointer to command details structure or NULL
> - *
> - * Download Package (0x0C40)
> - */
> -static enum ice_status
> -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
> - u16 buf_size, bool last_buf, u32 *error_offset,
> - u32 *error_info, struct ice_sq_cd *cd)
> -{
> - struct ice_aqc_download_pkg *cmd;
> - struct ice_aq_desc desc;
> - enum ice_status status;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - if (error_offset)
> - *error_offset = 0;
> - if (error_info)
> - *error_info = 0;
> -
> - cmd = &desc.params.download_pkg;
> - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
> - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> -
> - if (last_buf)
> - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
> -
> - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> - if (status == ICE_ERR_AQ_ERROR) {
> - /* Read error from buffer only when the FW returned an
> error */
> - struct ice_aqc_download_pkg_resp *resp;
> -
> - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
> - if (error_offset)
> - *error_offset = LE32_TO_CPU(resp->error_offset);
> - if (error_info)
> - *error_info = LE32_TO_CPU(resp->error_info);
> - }
> -
> - return status;
> -}
> -
> -/**
> - * ice_aq_upload_section
> - * @hw: pointer to the hardware structure
> - * @pkg_buf: the package buffer which will receive the section
> - * @buf_size: the size of the package buffer
> - * @cd: pointer to command details structure or NULL
> - *
> - * Upload Section (0x0C41)
> - */
> -enum ice_status
> -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
> - u16 buf_size, struct ice_sq_cd *cd)
> -{
> - struct ice_aq_desc desc;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
> - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> -
> - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> -}
> -
> -/**
> - * ice_aq_update_pkg
> - * @hw: pointer to the hardware structure
> - * @pkg_buf: the package cmd buffer
> - * @buf_size: the size of the package cmd buffer
> - * @last_buf: last buffer indicator
> - * @error_offset: returns error offset
> - * @error_info: returns error information
> - * @cd: pointer to command details structure or NULL
> - *
> - * Update Package (0x0C42)
> - */
> -static enum ice_status
> -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16
> buf_size,
> - bool last_buf, u32 *error_offset, u32 *error_info,
> - struct ice_sq_cd *cd)
> -{
> - struct ice_aqc_download_pkg *cmd;
> - struct ice_aq_desc desc;
> - enum ice_status status;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - if (error_offset)
> - *error_offset = 0;
> - if (error_info)
> - *error_info = 0;
> -
> - cmd = &desc.params.download_pkg;
> - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
> - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
> -
> - if (last_buf)
> - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
> -
> - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
> - if (status == ICE_ERR_AQ_ERROR) {
> - /* Read error from buffer only when the FW returned an
> error */
> - struct ice_aqc_download_pkg_resp *resp;
> -
> - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
> - if (error_offset)
> - *error_offset = LE32_TO_CPU(resp->error_offset);
> - if (error_info)
> - *error_info = LE32_TO_CPU(resp->error_info);
> - }
> -
> - return status;
> -}
> -
> -/**
> - * ice_find_seg_in_pkg
> - * @hw: pointer to the hardware structure
> - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
> - * @pkg_hdr: pointer to the package header to be searched
> - *
> - * This function searches a package file for a particular segment type. On
> - * success it returns a pointer to the segment header, otherwise it will
> - * return NULL.
> - */
> -static struct ice_generic_seg_hdr *
> -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
> - struct ice_pkg_hdr *pkg_hdr)
> -{
> - u32 i;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> - ice_debug(hw, ICE_DBG_PKG, "Package format
> version: %d.%d.%d.%d\n",
> - pkg_hdr->pkg_format_ver.major, pkg_hdr-
> >pkg_format_ver.minor,
> - pkg_hdr->pkg_format_ver.update,
> - pkg_hdr->pkg_format_ver.draft);
> -
> - /* Search all package segments for the requested segment type */
> - for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
> - struct ice_generic_seg_hdr *seg;
> -
> - seg = (struct ice_generic_seg_hdr *)
> - ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr-
> >seg_offset[i]));
> -
> - if (LE32_TO_CPU(seg->seg_type) == seg_type)
> - return seg;
> - }
> -
> - return NULL;
> -}
> -
> -/**
> - * ice_update_pkg_no_lock
> - * @hw: pointer to the hardware structure
> - * @bufs: pointer to an array of buffers
> - * @count: the number of buffers in the array
> - */
> -static enum ice_status
> -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32
> count)
> -{
> - enum ice_status status = ICE_SUCCESS;
> - u32 i;
> -
> - for (i = 0; i < count; i++) {
> - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
> - bool last = ((i + 1) == count);
> - u32 offset, info;
> -
> - status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh-
> >data_end),
> - last, &offset, &info, NULL);
> -
> - if (status) {
> - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed:
> err %d off %d inf %d\n",
> - status, offset, info);
> - break;
> - }
> - }
> -
> - return status;
> -}
> -
> -/**
> - * ice_update_pkg
> - * @hw: pointer to the hardware structure
> - * @bufs: pointer to an array of buffers
> - * @count: the number of buffers in the array
> - *
> - * Obtains change lock and updates package.
> - */
> -enum ice_status
> -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
> -{
> - enum ice_status status;
> -
> - status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
> - if (status)
> - return status;
> -
> - status = ice_update_pkg_no_lock(hw, bufs, count);
> -
> - ice_release_change_lock(hw);
> -
> - return status;
> -}
> -
> -/**
> - * ice_dwnld_cfg_bufs
> - * @hw: pointer to the hardware structure
> - * @bufs: pointer to an array of buffers
> - * @count: the number of buffers in the array
> - *
> - * Obtains global config lock and downloads the package configuration
> buffers
> - * to the firmware. Metadata buffers are skipped, and the first metadata
> buffer
> - * found indicates that the rest of the buffers are all metadata buffers.
> - */
> -static enum ice_status
> -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
> -{
> - enum ice_status status;
> - struct ice_buf_hdr *bh;
> - u32 offset, info, i;
> -
> - if (!bufs || !count)
> - return ICE_ERR_PARAM;
> -
> - /* If the first buffer's first section has its metadata bit set
> - * then there are no buffers to be downloaded, and the operation is
> - * considered a success.
> - */
> - bh = (struct ice_buf_hdr *)bufs;
> - if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
> - return ICE_SUCCESS;
> -
> - /* reset pkg_dwnld_status in case this function is called in the
> - * reset/rebuild flow
> - */
> - hw->pkg_dwnld_status = ICE_AQ_RC_OK;
> -
> - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
> - if (status) {
> - if (status == ICE_ERR_AQ_NO_WORK)
> - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
> - else
> - hw->pkg_dwnld_status = hw-
> >adminq.sq_last_status;
> - return status;
> - }
> -
> - for (i = 0; i < count; i++) {
> - bool last = ((i + 1) == count);
> -
> - if (!last) {
> - /* check next buffer for metadata flag */
> - bh = (struct ice_buf_hdr *)(bufs + i + 1);
> -
> - /* A set metadata flag in the next buffer will signal
> - * that the current buffer will be the last buffer
> - * downloaded
> - */
> - if (LE16_TO_CPU(bh->section_count))
> - if (LE32_TO_CPU(bh->section_entry[0].type)
> &
> - ICE_METADATA_BUF)
> - last = true;
> - }
> -
> - bh = (struct ice_buf_hdr *)(bufs + i);
> -
> - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
> last,
> - &offset, &info, NULL);
> -
> - /* Save AQ status from download package */
> - hw->pkg_dwnld_status = hw->adminq.sq_last_status;
> - if (status) {
> - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed:
> err %d off %d inf %d\n",
> - status, offset, info);
> - break;
> - }
> -
> - if (last)
> - break;
> - }
> -
> - if (!status) {
> - status = ice_set_vlan_mode(hw);
> - if (status)
> - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN
> mode: err %d\n",
> - status);
> - }
> -
> - ice_release_global_cfg_lock(hw);
> -
> - return status;
> -}
> -
> -/**
> - * ice_aq_get_pkg_info_list
> - * @hw: pointer to the hardware structure
> - * @pkg_info: the buffer which will receive the information list
> - * @buf_size: the size of the pkg_info information buffer
> - * @cd: pointer to command details structure or NULL
> - *
> - * Get Package Info List (0x0C43)
> - */
> -static enum ice_status
> -ice_aq_get_pkg_info_list(struct ice_hw *hw,
> - struct ice_aqc_get_pkg_info_resp *pkg_info,
> - u16 buf_size, struct ice_sq_cd *cd)
> -{
> - struct ice_aq_desc desc;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
> -
> - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
> -}
> -
> -/**
> - * ice_download_pkg
> - * @hw: pointer to the hardware structure
> - * @ice_seg: pointer to the segment of the package to be downloaded
> - *
> - * Handles the download of a complete package.
> - */
> -static enum ice_status
> -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
> -{
> - struct ice_buf_table *ice_buf_tbl;
> - enum ice_status status;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> - ice_debug(hw, ICE_DBG_PKG, "Segment format
> version: %d.%d.%d.%d\n",
> - ice_seg->hdr.seg_format_ver.major,
> - ice_seg->hdr.seg_format_ver.minor,
> - ice_seg->hdr.seg_format_ver.update,
> - ice_seg->hdr.seg_format_ver.draft);
> -
> - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
> - LE32_TO_CPU(ice_seg->hdr.seg_type),
> - LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
> -
> - ice_buf_tbl = ice_find_buf_table(ice_seg);
> -
> - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
> - LE32_TO_CPU(ice_buf_tbl->buf_count));
> -
> - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
> - LE32_TO_CPU(ice_buf_tbl->buf_count));
> -
> - ice_post_pkg_dwnld_vlan_mode_cfg(hw);
> -
> - return status;
> -}
> -
> -/**
> - * ice_init_pkg_info
> - * @hw: pointer to the hardware structure
> - * @pkg_hdr: pointer to the driver's package hdr
> - *
> - * Saves off the package details into the HW structure.
> - */
> -static enum ice_status
> -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
> -{
> - struct ice_generic_seg_hdr *seg_hdr;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> - if (!pkg_hdr)
> - return ICE_ERR_PARAM;
> -
> - hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
> -
> - ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
> - hw->pkg_seg_id);
> -
> - seg_hdr = (struct ice_generic_seg_hdr *)
> - ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
> - if (seg_hdr) {
> - struct ice_meta_sect *meta;
> - struct ice_pkg_enum state;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - /* Get package information from the Metadata Section */
> - meta = (struct ice_meta_sect *)
> - ice_pkg_enum_section((struct ice_seg *)seg_hdr,
> &state,
> - ICE_SID_METADATA);
> - if (!meta) {
> - ice_debug(hw, ICE_DBG_INIT, "Did not find ice
> metadata section in package\n");
> - return ICE_ERR_CFG;
> - }
> -
> - hw->pkg_ver = meta->ver;
> - ice_memcpy(hw->pkg_name, meta->name, sizeof(meta-
> >name),
> - ICE_NONDMA_TO_NONDMA);
> -
> - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
> - meta->ver.major, meta->ver.minor, meta-
> >ver.update,
> - meta->ver.draft, meta->name);
> -
> - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
> - ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
> - sizeof(hw->ice_seg_id),
> ICE_NONDMA_TO_NONDMA);
> -
> - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
> - seg_hdr->seg_format_ver.major,
> - seg_hdr->seg_format_ver.minor,
> - seg_hdr->seg_format_ver.update,
> - seg_hdr->seg_format_ver.draft,
> - seg_hdr->seg_id);
> - } else {
> - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in
> driver package\n");
> - return ICE_ERR_CFG;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_get_pkg_info
> - * @hw: pointer to the hardware structure
> - *
> - * Store details of the package currently loaded in HW into the HW
> structure.
> - */
> -static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
> -{
> - struct ice_aqc_get_pkg_info_resp *pkg_info;
> - enum ice_status status;
> - u16 size;
> - u32 i;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
> - pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
> - if (!pkg_info)
> - return ICE_ERR_NO_MEMORY;
> -
> - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
> - if (status)
> - goto init_pkg_free_alloc;
> -
> - for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
> -#define ICE_PKG_FLAG_COUNT 4
> - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
> - u8 place = 0;
> -
> - if (pkg_info->pkg_info[i].is_active) {
> - flags[place++] = 'A';
> - hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
> - hw->active_track_id =
> - LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
> - ice_memcpy(hw->active_pkg_name,
> - pkg_info->pkg_info[i].name,
> - sizeof(pkg_info->pkg_info[i].name),
> - ICE_NONDMA_TO_NONDMA);
> - hw->active_pkg_in_nvm = pkg_info-
> >pkg_info[i].is_in_nvm;
> - }
> - if (pkg_info->pkg_info[i].is_active_at_boot)
> - flags[place++] = 'B';
> - if (pkg_info->pkg_info[i].is_modified)
> - flags[place++] = 'M';
> - if (pkg_info->pkg_info[i].is_in_nvm)
> - flags[place++] = 'N';
> -
> - ice_debug(hw, ICE_DBG_PKG,
> "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
> - i, pkg_info->pkg_info[i].ver.major,
> - pkg_info->pkg_info[i].ver.minor,
> - pkg_info->pkg_info[i].ver.update,
> - pkg_info->pkg_info[i].ver.draft,
> - pkg_info->pkg_info[i].name, flags);
> - }
> -
> -init_pkg_free_alloc:
> - ice_free(hw, pkg_info);
> -
> - return status;
> -}
> -
> -/**
> - * ice_verify_pkg - verify package
> - * @pkg: pointer to the package buffer
> - * @len: size of the package buffer
> - *
> - * Verifies various attributes of the package file, including length, format
> - * version, and the requirement of at least one segment.
> - */
> -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
> -{
> - u32 seg_count;
> - u32 i;
> -
> - if (len < ice_struct_size(pkg, seg_offset, 1))
> - return ICE_ERR_BUF_TOO_SHORT;
> -
> - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
> - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
> - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
> - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
> - return ICE_ERR_CFG;
> -
> - /* pkg must have at least one segment */
> - seg_count = LE32_TO_CPU(pkg->seg_count);
> - if (seg_count < 1)
> - return ICE_ERR_CFG;
> -
> - /* make sure segment array fits in package length */
> - if (len < ice_struct_size(pkg, seg_offset, seg_count))
> - return ICE_ERR_BUF_TOO_SHORT;
> -
> - /* all segments must fit within length */
> - for (i = 0; i < seg_count; i++) {
> - u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
> - struct ice_generic_seg_hdr *seg;
> -
> - /* segment header must fit */
> - if (len < off + sizeof(*seg))
> - return ICE_ERR_BUF_TOO_SHORT;
> -
> - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
> -
> - /* segment body must fit */
> - if (len < off + LE32_TO_CPU(seg->seg_size))
> - return ICE_ERR_BUF_TOO_SHORT;
> - }
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_free_seg - free package segment pointer
> - * @hw: pointer to the hardware structure
> - *
> - * Frees the package segment pointer in the proper manner, depending on
> if the
> - * segment was allocated or just the passed in pointer was stored.
> - */
> -void ice_free_seg(struct ice_hw *hw)
> -{
> - if (hw->pkg_copy) {
> - ice_free(hw, hw->pkg_copy);
> - hw->pkg_copy = NULL;
> - hw->pkg_size = 0;
> - }
> - hw->seg = NULL;
> -}
> -
> -/**
> - * ice_init_pkg_regs - initialize additional package registers
> - * @hw: pointer to the hardware structure
> - */
> -static void ice_init_pkg_regs(struct ice_hw *hw)
> -{
> -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
> -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
> -#define ICE_SW_BLK_IDX 0
> - if (hw->dcf_enabled)
> - return;
> -
> - /* setup Switch block input mask, which is 48-bits in two parts */
> - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX),
> ICE_SW_BLK_INP_MASK_L);
> - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX),
> ICE_SW_BLK_INP_MASK_H);
> -}
> -
> -/**
> - * ice_chk_pkg_version - check package version for compatibility with driver
> - * @pkg_ver: pointer to a version structure to check
> - *
> - * Check to make sure that the package about to be downloaded is
> compatible with
> - * the driver. To be compatible, the major and minor components of the
> package
> - * version must match our ICE_PKG_SUPP_VER_MAJ and
> ICE_PKG_SUPP_VER_MNR
> - * definitions.
> - */
> -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
> -{
> - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
> - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
> - return ICE_ERR_NOT_SUPPORTED;
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_chk_pkg_compat
> - * @hw: pointer to the hardware structure
> - * @ospkg: pointer to the package hdr
> - * @seg: pointer to the package segment hdr
> - *
> - * This function checks the package version compatibility with driver and
> NVM
> - */
> -static enum ice_status
> -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
> - struct ice_seg **seg)
> -{
> - struct ice_aqc_get_pkg_info_resp *pkg;
> - enum ice_status status;
> - u16 size;
> - u32 i;
> -
> - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
> -
> - /* Check package version compatibility */
> - status = ice_chk_pkg_version(&hw->pkg_ver);
> - if (status) {
> - ice_debug(hw, ICE_DBG_INIT, "Package version check
> failed.\n");
> - return status;
> - }
> -
> - /* find ICE segment in given package */
> - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
> - ospkg);
> - if (!*seg) {
> - ice_debug(hw, ICE_DBG_INIT, "no ice segment in
> package.\n");
> - return ICE_ERR_CFG;
> - }
> -
> - /* Check if FW is compatible with the OS package */
> - size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
> - pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
> - if (!pkg)
> - return ICE_ERR_NO_MEMORY;
> -
> - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
> - if (status)
> - goto fw_ddp_compat_free_alloc;
> -
> - for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
> - /* loop till we find the NVM package */
> - if (!pkg->pkg_info[i].is_in_nvm)
> - continue;
> - if ((*seg)->hdr.seg_format_ver.major !=
> - pkg->pkg_info[i].ver.major ||
> - (*seg)->hdr.seg_format_ver.minor >
> - pkg->pkg_info[i].ver.minor) {
> - status = ICE_ERR_FW_DDP_MISMATCH;
> - ice_debug(hw, ICE_DBG_INIT, "OS package is not
> compatible with NVM.\n");
> - }
> - /* done processing NVM package so break */
> - break;
> - }
> -fw_ddp_compat_free_alloc:
> - ice_free(hw, pkg);
> - return status;
> -}
> -
> -/**
> - * ice_sw_fv_handler
> - * @sect_type: section type
> - * @section: pointer to section
> - * @index: index of the field vector entry to be returned
> - * @offset: ptr to variable that receives the offset in the field vector table
> - *
> - * This is a callback function that can be passed to ice_pkg_enum_entry.
> - * This function treats the given section as of type ice_sw_fv_section and
> - * enumerates offset field. "offset" is an index into the field vector table.
> - */
> -static void *
> -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
> -{
> - struct ice_sw_fv_section *fv_section =
> - (struct ice_sw_fv_section *)section;
> -
> - if (!section || sect_type != ICE_SID_FLD_VEC_SW)
> - return NULL;
> - if (index >= LE16_TO_CPU(fv_section->count))
> - return NULL;
> - if (offset)
> - /* "index" passed in to this function is relative to a given
> - * 4k block. To get to the true index into the field vector
> - * table need to add the relative index to the base_offset
> - * field of this section
> - */
> - *offset = LE16_TO_CPU(fv_section->base_offset) + index;
> - return fv_section->fv + index;
> -}
> -
> -/**
> - * ice_get_prof_index_max - get the max profile index for used profile
> - * @hw: pointer to the HW struct
> - *
> - * Calling this function will get the max profile index for used profile
> - * and store the index number in struct ice_switch_info *switch_info
> - * in hw for following use.
> - */
> -static int ice_get_prof_index_max(struct ice_hw *hw)
> -{
> - u16 prof_index = 0, j, max_prof_index = 0;
> - struct ice_pkg_enum state;
> - struct ice_seg *ice_seg;
> - bool flag = false;
> - struct ice_fv *fv;
> - u32 offset;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - if (!hw->seg)
> - return ICE_ERR_PARAM;
> -
> - ice_seg = hw->seg;
> -
> - do {
> - fv = (struct ice_fv *)
> - ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> - &offset, ice_sw_fv_handler);
> - if (!fv)
> - break;
> - ice_seg = NULL;
> -
> - /* in the profile that not be used, the prot_id is set to 0xff
> - * and the off is set to 0x1ff for all the field vectors.
> - */
> - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
> - if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
> - fv->ew[j].off != ICE_FV_OFFSET_INVAL)
> - flag = true;
> - if (flag && prof_index > max_prof_index)
> - max_prof_index = prof_index;
> -
> - prof_index++;
> - flag = false;
> - } while (fv);
> -
> - hw->switch_info->max_used_prof_index = max_prof_index;
> -
> - return ICE_SUCCESS;
> -}
> -
> -/**
> - * ice_init_pkg - initialize/download package
> - * @hw: pointer to the hardware structure
> - * @buf: pointer to the package buffer
> - * @len: size of the package buffer
> - *
> - * This function initializes a package. The package contains HW tables
> - * required to do packet processing. First, the function extracts package
> - * information such as version. Then it finds the ice configuration segment
> - * within the package; this function then saves a copy of the segment
> pointer
> - * within the supplied package buffer. Next, the function will cache any
> hints
> - * from the package, followed by downloading the package itself. Note,
> that if
> - * a previous PF driver has already downloaded the package successfully,
> then
> - * the current driver will not have to download the package again.
> - *
> - * The local package contents will be used to query default behavior and to
> - * update specific sections of the HW's version of the package (e.g. to
> update
> - * the parse graph to understand new protocols).
> - *
> - * This function stores a pointer to the package buffer memory, and it is
> - * expected that the supplied buffer will not be freed immediately. If the
> - * package buffer needs to be freed, such as when read from a file, use
> - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
> - * case.
> - */
> -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
> -{
> - struct ice_pkg_hdr *pkg;
> - enum ice_status status;
> - struct ice_seg *seg;
> -
> - if (!buf || !len)
> - return ICE_ERR_PARAM;
> -
> - pkg = (struct ice_pkg_hdr *)buf;
> - status = ice_verify_pkg(pkg, len);
> - if (status) {
> - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg
> (err: %d)\n",
> - status);
> - return status;
> - }
> -
> - /* initialize package info */
> - status = ice_init_pkg_info(hw, pkg);
> - if (status)
> - return status;
> -
> - /* before downloading the package, check package version for
> - * compatibility with driver
> - */
> - status = ice_chk_pkg_compat(hw, pkg, &seg);
> - if (status)
> - return status;
> -
> - /* initialize package hints and then download package */
> - ice_init_pkg_hints(hw, seg);
> - status = ice_download_pkg(hw, seg);
> - if (status == ICE_ERR_AQ_NO_WORK) {
> - ice_debug(hw, ICE_DBG_INIT, "package previously loaded -
> no work.\n");
> - status = ICE_SUCCESS;
> - }
> -
> - /* Get information on the package currently loaded in HW, then
> make sure
> - * the driver is compatible with this version.
> - */
> - if (!status) {
> - status = ice_get_pkg_info(hw);
> - if (!status)
> - status = ice_chk_pkg_version(&hw->active_pkg_ver);
> - }
> -
> - if (!status) {
> - hw->seg = seg;
> - /* on successful package download update other required
> - * registers to support the package and fill HW tables
> - * with package content.
> - */
> - ice_init_pkg_regs(hw);
> - ice_fill_blk_tbls(hw);
> - ice_fill_hw_ptype(hw);
> - ice_get_prof_index_max(hw);
> - } else {
> - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
> - status);
> - }
> -
> - return status;
> -}
> -
> -/**
> - * ice_copy_and_init_pkg - initialize/download a copy of the package
> - * @hw: pointer to the hardware structure
> - * @buf: pointer to the package buffer
> - * @len: size of the package buffer
> - *
> - * This function copies the package buffer, and then calls ice_init_pkg() to
> - * initialize the copied package contents.
> - *
> - * The copying is necessary if the package buffer supplied is constant, or if
> - * the memory may disappear shortly after calling this function.
> - *
> - * If the package buffer resides in the data segment and can be modified,
> the
> - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
> - *
> - * However, if the package buffer needs to be copied first, such as when
> being
> - * read from a file, the caller should use ice_copy_and_init_pkg().
> - *
> - * This function will first copy the package buffer, before calling
> - * ice_init_pkg(). The caller is free to immediately destroy the original
> - * package buffer, as the new copy will be managed by this function and
> - * related routines.
> - */
> -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
> u32 len)
> -{
> - enum ice_status status;
> - u8 *buf_copy;
> -
> - if (!buf || !len)
> - return ICE_ERR_PARAM;
> -
> - buf_copy = (u8 *)ice_memdup(hw, buf, len,
> ICE_NONDMA_TO_NONDMA);
> -
> - status = ice_init_pkg(hw, buf_copy, len);
> - if (status) {
> - /* Free the copy, since we failed to initialize the package */
> - ice_free(hw, buf_copy);
> - } else {
> - /* Track the copied pkg so we can free it later */
> - hw->pkg_copy = buf_copy;
> - hw->pkg_size = len;
> - }
> -
> - return status;
> -}
> -
> -/**
> - * ice_pkg_buf_alloc
> - * @hw: pointer to the HW structure
> - *
> - * Allocates a package buffer and returns a pointer to the buffer header.
> - * Note: all package contents must be in Little Endian form.
> - */
> -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
> -{
> - struct ice_buf_build *bld;
> - struct ice_buf_hdr *buf;
> -
> - bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
> - if (!bld)
> - return NULL;
> -
> - buf = (struct ice_buf_hdr *)bld;
> - buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
> - section_entry));
> - return bld;
> -}
> -
> -/**
> - * ice_get_sw_prof_type - determine switch profile type
> - * @hw: pointer to the HW structure
> - * @fv: pointer to the switch field vector
> - */
> -static enum ice_prof_type
> -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
> -{
> - u16 i;
> - bool valid_prof = false;
> -
> - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
> - if (fv->ew[i].off != ICE_NAN_OFFSET)
> - valid_prof = true;
> -
> - /* UDP tunnel will have UDP_OF protocol ID and VNI offset
> */
> - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
> - fv->ew[i].off == ICE_VNI_OFFSET)
> - return ICE_PROF_TUN_UDP;
> -
> - /* GRE tunnel will have GRE protocol */
> - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
> - return ICE_PROF_TUN_GRE;
> -
> - /* PPPOE tunnel will have PPPOE protocol */
> - if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
> - return ICE_PROF_TUN_PPPOE;
> - }
> -
> - return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID;
> -}
> -
> -/**
> - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile
> type
> - * @hw: pointer to hardware structure
> - * @req_profs: type of profiles requested
> - * @bm: pointer to memory for returning the bitmap of field vectors
> - */
> -void
> -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
> - ice_bitmap_t *bm)
> -{
> - struct ice_pkg_enum state;
> - struct ice_seg *ice_seg;
> - struct ice_fv *fv;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> - ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
> - ice_seg = hw->seg;
> - do {
> - enum ice_prof_type prof_type;
> - u32 offset;
> -
> - fv = (struct ice_fv *)
> - ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> - &offset, ice_sw_fv_handler);
> - ice_seg = NULL;
> -
> - if (fv) {
> - /* Determine field vector type */
> - prof_type = ice_get_sw_prof_type(hw, fv);
> -
> - if (req_profs & prof_type)
> - ice_set_bit((u16)offset, bm);
> - }
> - } while (fv);
> -}
> -
> -/**
> - * ice_get_sw_fv_list
> + * ice_add_tunnel_hint
> * @hw: pointer to the HW structure
> - * @prot_ids: field vector to search for with a given protocol ID
> - * @ids_cnt: lookup/protocol count
> - * @bm: bitmap of field vectors to consider
> - * @fv_list: Head of a list
> - *
> - * Finds all the field vector entries from switch block that contain
> - * a given protocol ID and returns a list of structures of type
> - * "ice_sw_fv_list_entry". Every structure in the list has a field vector
> - * definition and profile ID information
> - * NOTE: The caller of the function is responsible for freeing the memory
> - * allocated for every list entry.
> + * @label_name: label text
> + * @val: value of the tunnel port boost entry
> */
> -enum ice_status
> -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
> - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
> +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
> {
> - struct ice_sw_fv_list_entry *fvl;
> - struct ice_sw_fv_list_entry *tmp;
> - struct ice_pkg_enum state;
> - struct ice_seg *ice_seg;
> - struct ice_fv *fv;
> - u32 offset;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - if (!ids_cnt || !hw->seg)
> - return ICE_ERR_PARAM;
> -
> - ice_seg = hw->seg;
> - do {
> + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
> u16 i;
>
> - fv = (struct ice_fv *)
> - ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> - &offset, ice_sw_fv_handler);
> - if (!fv)
> - break;
> - ice_seg = NULL;
> -
> - /* If field vector is not in the bitmap list, then skip this
> - * profile.
> - */
> - if (!ice_is_bit_set(bm, (u16)offset))
> - continue;
> + for (i = 0; tnls[i].type != TNL_LAST; i++) {
> + size_t len = strlen(tnls[i].label_prefix);
>
> - for (i = 0; i < ids_cnt; i++) {
> - int j;
> + /* Look for matching label start, before continuing
> */
> + if (strncmp(label_name, tnls[i].label_prefix, len))
> + continue;
>
> - /* This code assumes that if a switch field vector line
> - * has a matching protocol, then this line will
> contain
> - * the entries necessary to represent every field in
> - * that protocol header.
> + /* Make sure this label matches our PF. Note that
> the PF
> + * character ('0' - '7') will be located where our
> + * prefix string's null terminator is located.
> */
> - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
> - if (fv->ew[j].prot_id == prot_ids[i])
> - break;
> - if (j >= hw->blk[ICE_BLK_SW].es.fvw)
> - break;
> - if (i + 1 == ids_cnt) {
> - fvl = (struct ice_sw_fv_list_entry *)
> - ice_malloc(hw, sizeof(*fvl));
> - if (!fvl)
> - goto err;
> - fvl->fv_ptr = fv;
> - fvl->profile_id = offset;
> - LIST_ADD(&fvl->list_entry, fv_list);
> + if ((label_name[len] - '0') == hw->pf_id) {
> + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
> + hw->tnl.tbl[hw->tnl.count].valid = false;
> + hw->tnl.tbl[hw->tnl.count].in_use = false;
> + hw->tnl.tbl[hw->tnl.count].marked = false;
> + hw->tnl.tbl[hw->tnl.count].boost_addr = val;
> + hw->tnl.tbl[hw->tnl.count].port = 0;
> + hw->tnl.count++;
> break;
> }
> }
> - } while (fv);
> - if (LIST_EMPTY(fv_list))
> - return ICE_ERR_CFG;
> - return ICE_SUCCESS;
> -
> -err:
> - LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
> - list_entry) {
> - LIST_DEL(&fvl->list_entry);
> - ice_free(hw, fvl);
> }
> -
> - return ICE_ERR_NO_MEMORY;
> }
>
> /**
> - * ice_init_prof_result_bm - Initialize the profile result index bitmap
> - * @hw: pointer to hardware structure
> + * ice_add_dvm_hint
> + * @hw: pointer to the HW structure
> + * @val: value of the boost entry
> + * @enable: true if entry needs to be enabled, or false if needs to be
> disabled
> */
> -void ice_init_prof_result_bm(struct ice_hw *hw)
> +void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
> {
> - struct ice_pkg_enum state;
> - struct ice_seg *ice_seg;
> - struct ice_fv *fv;
> -
> - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
> -
> - if (!hw->seg)
> - return;
> -
> - ice_seg = hw->seg;
> - do {
> - u32 off;
> - u16 i;
> -
> - fv = (struct ice_fv *)
> - ice_pkg_enum_entry(ice_seg, &state,
> ICE_SID_FLD_VEC_SW,
> - &off, ice_sw_fv_handler);
> - ice_seg = NULL;
> - if (!fv)
> - break;
> + if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
> + hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
> + hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
> + hw->dvm_upd.count++;
> + }
> +}
>
> - ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
> - ICE_MAX_FV_WORDS);
> +/* Key creation */
>
> - /* Determine empty field vector indices, these can be
> - * used for recipe results. Skip index 0, since it is
> - * always used for Switch ID.
> - */
> - for (i = 1; i < ICE_MAX_FV_WORDS; i++)
> - if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
> - fv->ew[i].off == ICE_FV_OFFSET_INVAL)
> - ice_set_bit(i,
> - hw->switch_info-
> >prof_res_bm[off]);
> - } while (fv);
> -}
> +#define ICE_DC_KEY 0x1 /* don't care */
> +#define ICE_DC_KEYINV 0x1
> +#define ICE_NM_KEY 0x0 /* never match */
> +#define ICE_NM_KEYINV 0x0
> +#define ICE_0_KEY 0x1 /* match 0 */
> +#define ICE_0_KEYINV 0x0
> +#define ICE_1_KEY 0x0 /* match 1 */
> +#define ICE_1_KEYINV 0x1
>
> /**
> - * ice_pkg_buf_free
> - * @hw: pointer to the HW structure
> - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> + * ice_gen_key_word - generate 16-bits of a key/mask word
> + * @val: the value
> + * @valid: valid bits mask (change only the valid bits)
> + * @dont_care: don't care mask
> + * @nvr_mtch: never match mask
> + * @key: pointer to an array of where the resulting key portion
> + * @key_inv: pointer to an array of where the resulting key invert portion
> *
> - * Frees a package buffer
> - */
> -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
> -{
> - ice_free(hw, bld);
> -}
> -
> -/**
> - * ice_pkg_buf_reserve_section
> - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> - * @count: the number of sections to reserve
> + * This function generates 16-bits from a 8-bit value, an 8-bit don't care
> mask
> + * and an 8-bit never match mask. The 16-bits of output are divided into 8
> bits
> + * of key and 8 bits of key invert.
> + *
> + * '0' = b01, always match a 0 bit
> + * '1' = b10, always match a 1 bit
> + * '?' = b11, don't care bit (always matches)
> + * '~' = b00, never match bit
> *
> - * Reserves one or more section table entries in a package buffer. This
> routine
> - * can be called multiple times as long as they are made before calling
> - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
> - * is called once, the number of sections that can be allocated will not be
> able
> - * to be increased; not using all reserved sections is fine, but this will
> - * result in some wasted space in the buffer.
> - * Note: all package contents must be in Little Endian form.
> + * Input:
> + * val: b0 1 0 1 0 1
> + * dont_care: b0 0 1 1 0 0
> + * never_mtch: b0 0 0 0 1 1
> + * ------------------------------
> + * Result: key: b01 10 11 11 00 00
> */
> static enum ice_status
> -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
> +ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
> + u8 *key_inv)
> {
> - struct ice_buf_hdr *buf;
> - u16 section_count;
> - u16 data_end;
> + u8 in_key = *key, in_key_inv = *key_inv;
> + u8 i;
>
> - if (!bld)
> - return ICE_ERR_PARAM;
> + /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
> + if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
> + return ICE_ERR_CFG;
>
> - buf = (struct ice_buf_hdr *)&bld->buf;
> + *key = 0;
> + *key_inv = 0;
>
> - /* already an active section, can't increase table size */
> - section_count = LE16_TO_CPU(buf->section_count);
> - if (section_count > 0)
> - return ICE_ERR_CFG;
> + /* encode the 8 bits into 8-bit key and 8-bit key invert */
> + for (i = 0; i < 8; i++) {
> + *key >>= 1;
> + *key_inv >>= 1;
>
> - if (bld->reserved_section_table_entries + count >
> ICE_MAX_S_COUNT)
> - return ICE_ERR_CFG;
> - bld->reserved_section_table_entries += count;
> + if (!(valid & 0x1)) { /* change only valid bits */
> + *key |= (in_key & 0x1) << 7;
> + *key_inv |= (in_key_inv & 0x1) << 7;
> + } else if (dont_care & 0x1) { /* don't care bit */
> + *key |= ICE_DC_KEY << 7;
> + *key_inv |= ICE_DC_KEYINV << 7;
> + } else if (nvr_mtch & 0x1) { /* never match bit */
> + *key |= ICE_NM_KEY << 7;
> + *key_inv |= ICE_NM_KEYINV << 7;
> + } else if (val & 0x01) { /* exact 1 match */
> + *key |= ICE_1_KEY << 7;
> + *key_inv |= ICE_1_KEYINV << 7;
> + } else { /* exact 0 match */
> + *key |= ICE_0_KEY << 7;
> + *key_inv |= ICE_0_KEYINV << 7;
> + }
>
> - data_end = LE16_TO_CPU(buf->data_end) +
> - FLEX_ARRAY_SIZE(buf, section_entry, count);
> - buf->data_end = CPU_TO_LE16(data_end);
> + dont_care >>= 1;
> + nvr_mtch >>= 1;
> + valid >>= 1;
> + val >>= 1;
> + in_key >>= 1;
> + in_key_inv >>= 1;
> + }
>
> return ICE_SUCCESS;
> }
>
> /**
> - * ice_pkg_buf_alloc_section
> - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> - * @type: the section type value
> - * @size: the size of the section to reserve (in bytes)
> + * ice_bits_max_set - determine if the number of bits set is within a
> maximum
> + * @mask: pointer to the byte array which is the mask
> + * @size: the number of bytes in the mask
> + * @max: the max number of set bits
> *
> - * Reserves memory in the buffer for a section's content and updates the
> - * buffers' status accordingly. This routine returns a pointer to the first
> - * byte of the section start within the buffer, which is used to fill in the
> - * section contents.
> - * Note: all package contents must be in Little Endian form.
> + * This function determines if there are at most 'max' number of bits set in
> an
> + * array. Returns true if the number for bits set is <= max or will return
> false
> + * otherwise.
> */
> -static void *
> -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
> +static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
> {
> - struct ice_buf_hdr *buf;
> - u16 sect_count;
> - u16 data_end;
> -
> - if (!bld || !type || !size)
> - return NULL;
> -
> - buf = (struct ice_buf_hdr *)&bld->buf;
> -
> - /* check for enough space left in buffer */
> - data_end = LE16_TO_CPU(buf->data_end);
> -
> - /* section start must align on 4 byte boundary */
> - data_end = ICE_ALIGN(data_end, 4);
> -
> - if ((data_end + size) > ICE_MAX_S_DATA_END)
> - return NULL;
> -
> - /* check for more available section table entries */
> - sect_count = LE16_TO_CPU(buf->section_count);
> - if (sect_count < bld->reserved_section_table_entries) {
> - void *section_ptr = ((u8 *)buf) + data_end;
> + u16 count = 0;
> + u16 i;
>
> - buf->section_entry[sect_count].offset =
> CPU_TO_LE16(data_end);
> - buf->section_entry[sect_count].size = CPU_TO_LE16(size);
> - buf->section_entry[sect_count].type = CPU_TO_LE32(type);
> + /* check each byte */
> + for (i = 0; i < size; i++) {
> + /* if 0, go to next byte */
> + if (!mask[i])
> + continue;
>
> - data_end += size;
> - buf->data_end = CPU_TO_LE16(data_end);
> + /* We know there is at least one set bit in this byte because
> of
> + * the above check; if we already have found 'max' number
> of
> + * bits set, then we can return failure now.
> + */
> + if (count == max)
> + return false;
>
> - buf->section_count = CPU_TO_LE16(sect_count + 1);
> - return section_ptr;
> + /* count the bits in this byte, checking threshold */
> + count += ice_hweight8(mask[i]);
> + if (count > max)
> + return false;
> }
>
> - /* no free section table entries */
> - return NULL;
> + return true;
> }
>
> /**
> - * ice_pkg_buf_alloc_single_section
> - * @hw: pointer to the HW structure
> - * @type: the section type value
> - * @size: the size of the section to reserve (in bytes)
> - * @section: returns pointer to the section
> + * ice_set_key - generate a variable sized key with multiples of 16-bits
> + * @key: pointer to where the key will be stored
> + * @size: the size of the complete key in bytes (must be even)
> + * @val: array of 8-bit values that makes up the value portion of the key
> + * @upd: array of 8-bit masks that determine what key portion to update
> + * @dc: array of 8-bit masks that make up the don't care mask
> + * @nm: array of 8-bit masks that make up the never match mask
> + * @off: the offset of the first byte in the key to update
> + * @len: the number of bytes in the key update
> *
> - * Allocates a package buffer with a single section.
> - * Note: all package contents must be in Little Endian form.
> + * This function generates a key from a value, a don't care mask and a
> never
> + * match mask.
> + * upd, dc, and nm are optional parameters, and can be NULL:
> + * upd == NULL --> upd mask is all 1's (update all bits)
> + * dc == NULL --> dc mask is all 0's (no don't care bits)
> + * nm == NULL --> nm mask is all 0's (no never match bits)
> */
> -struct ice_buf_build *
> -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
> - void **section)
> +enum ice_status
> +ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
> + u16 len)
> {
> - struct ice_buf_build *buf;
> -
> - if (!section)
> - return NULL;
> -
> - buf = ice_pkg_buf_alloc(hw);
> - if (!buf)
> - return NULL;
> -
> - if (ice_pkg_buf_reserve_section(buf, 1))
> - goto ice_pkg_buf_alloc_single_section_err;
> -
> - *section = ice_pkg_buf_alloc_section(buf, type, size);
> - if (!*section)
> - goto ice_pkg_buf_alloc_single_section_err;
> -
> - return buf;
> -
> -ice_pkg_buf_alloc_single_section_err:
> - ice_pkg_buf_free(hw, buf);
> - return NULL;
> -}
> + u16 half_size;
> + u16 i;
>
> -/**
> - * ice_pkg_buf_get_active_sections
> - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> - *
> - * Returns the number of active sections. Before using the package buffer
> - * in an update package command, the caller should make sure that there
> is at
> - * least one active section - otherwise, the buffer is not legal and should
> - * not be used.
> - * Note: all package contents must be in Little Endian form.
> - */
> -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
> -{
> - struct ice_buf_hdr *buf;
> + /* size must be a multiple of 2 bytes. */
> + if (size % 2)
> + return ICE_ERR_CFG;
> + half_size = size / 2;
>
> - if (!bld)
> - return 0;
> + if (off + len > half_size)
> + return ICE_ERR_CFG;
>
> - buf = (struct ice_buf_hdr *)&bld->buf;
> - return LE16_TO_CPU(buf->section_count);
> -}
> + /* Make sure at most one bit is set in the never match mask. Having
> more
> + * than one never match mask bit set will cause HW to consume
> excessive
> + * power otherwise; this is a power management efficiency check.
> + */
> +#define ICE_NVR_MTCH_BITS_MAX 1
> + if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
> + return ICE_ERR_CFG;
>
> -/**
> - * ice_pkg_buf
> - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
> - *
> - * Return a pointer to the buffer's header
> - */
> -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
> -{
> - if (!bld)
> - return NULL;
> + for (i = 0; i < len; i++)
> + if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
> + dc ? dc[i] : 0, nm ? nm[i] : 0,
> + key + off + i, key + half_size + off + i))
> + return ICE_ERR_CFG;
>
> - return &bld->buf;
> + return ICE_SUCCESS;
> }
>
> /**
> @@ -3956,6 +2132,18 @@ static void ice_fill_tbl(struct ice_hw *hw, enum
> ice_block block_id, u32 sid)
> }
> }
>
> +/**
> + * ice_init_flow_profs - init flow profile locks and list heads
> + * @hw: pointer to the hardware structure
> + * @blk_idx: HW block index
> + */
> +static
> +void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
> +{
> + ice_init_lock(&hw->fl_profs_locks[blk_idx]);
> + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
> +}
> +
> /**
> * ice_fill_blk_tbls - Read package context for tables
> * @hw: pointer to the hardware structure
> @@ -4098,17 +2286,6 @@ void ice_free_hw_tbls(struct ice_hw *hw)
> ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
> }
>
> -/**
> - * ice_init_flow_profs - init flow profile locks and list heads
> - * @hw: pointer to the hardware structure
> - * @blk_idx: HW block index
> - */
> -static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
> -{
> - ice_init_lock(&hw->fl_profs_locks[blk_idx]);
> - INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
> -}
> -
> /**
> * ice_clear_hw_tbls - clear HW tables and flow profiles
> * @hw: pointer to the hardware structure
> diff --git a/drivers/net/ice/base/ice_flex_pipe.h
> b/drivers/net/ice/base/ice_flex_pipe.h
> index ab897de4f3..aab765e68f 100644
> --- a/drivers/net/ice/base/ice_flex_pipe.h
> +++ b/drivers/net/ice/base/ice_flex_pipe.h
> @@ -7,23 +7,6 @@
>
> #include "ice_type.h"
>
> -/* Package minimal version supported */
> -#define ICE_PKG_SUPP_VER_MAJ 1
> -#define ICE_PKG_SUPP_VER_MNR 3
> -
> -/* Package format version */
> -#define ICE_PKG_FMT_VER_MAJ 1
> -#define ICE_PKG_FMT_VER_MNR 0
> -#define ICE_PKG_FMT_VER_UPD 0
> -#define ICE_PKG_FMT_VER_DFT 0
> -
> -#define ICE_PKG_CNT 4
> -
> -enum ice_status
> -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
> -enum ice_status
> -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type
> access);
> -void ice_release_change_lock(struct ice_hw *hw);
> enum ice_status
> ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u8 fv_idx,
> u8 *prot, u16 *off);
> @@ -36,12 +19,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum
> ice_prof_type type,
> void
> ice_init_prof_result_bm(struct ice_hw *hw);
> enum ice_status
> -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
> - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
> -enum ice_status
> -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
> -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
> -enum ice_status
> ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
> u16 buf_size, struct ice_sq_cd *cd);
> bool
> @@ -79,31 +56,31 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum
> ice_block blk, u16 vsi, u64 hdl);
> enum ice_status
> ice_flow_assoc_hw_prof(struct ice_hw *hw, enum ice_block blk,
> u16 dest_vsi_handle, u16 fdir_vsi_handle, int id);
> -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
> -enum ice_status
> -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
> enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
> -void ice_free_seg(struct ice_hw *hw);
> void ice_fill_blk_tbls(struct ice_hw *hw);
> void ice_clear_hw_tbls(struct ice_hw *hw);
> void ice_free_hw_tbls(struct ice_hw *hw);
> enum ice_status
> ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
> -struct ice_buf_build *
> -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
> - void **section);
> -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
> -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
>
> enum ice_status
> ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
> u16 len);
> -void *
> -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> - u32 sect_type, u32 *offset,
> - void *(*handler)(u32 sect_type, void *section,
> - u32 index, u32 *offset));
> -void *
> -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
> - u32 sect_type);
> +
> +void ice_fill_blk_tbls(struct ice_hw *hw);
> +
> +/* To support tunneling entries by PF, the package will append the PF
> number to
> + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1,
> TNL_VXLAN_PF2, etc.
> + */
> +#define ICE_TNL_PRE "TNL_"
> +/* For supporting double VLAN mode, it is necessary to enable or disable
> certain
> + * boost tcam entries. The metadata labels names that match the
> following
> + * prefixes will be saved to allow enabling double VLAN mode.
> + */
> +#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable
> these entries */
> +#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these
> entries */
> +
> +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val);
> +void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable);
> +
> #endif /* _ICE_FLEX_PIPE_H_ */
> diff --git a/drivers/net/ice/base/ice_flex_type.h
> b/drivers/net/ice/base/ice_flex_type.h
> index 09a02fe9ac..d45653b637 100644
> --- a/drivers/net/ice/base/ice_flex_type.h
> +++ b/drivers/net/ice/base/ice_flex_type.h
> @@ -14,6 +14,7 @@ struct ice_fv_word {
> u16 off; /* Offset within the protocol header */
> u8 resvrd;
> };
> +
> #pragma pack()
>
> #define ICE_MAX_NUM_PROFILES 256
> @@ -23,251 +24,6 @@ struct ice_fv {
> struct ice_fv_word ew[ICE_MAX_FV_WORDS];
> };
>
> -/* Package and segment headers and tables */
> -struct ice_pkg_hdr {
> - struct ice_pkg_ver pkg_format_ver;
> - __le32 seg_count;
> - __le32 seg_offset[STRUCT_HACK_VAR_LEN];
> -};
> -
> -/* generic segment */
> -struct ice_generic_seg_hdr {
> -#define SEGMENT_TYPE_METADATA 0x00000001
> -#define SEGMENT_TYPE_ICE_E810 0x00000010
> - __le32 seg_type;
> - struct ice_pkg_ver seg_format_ver;
> - __le32 seg_size;
> - char seg_id[ICE_PKG_NAME_SIZE];
> -};
> -
> -/* ice specific segment */
> -
> -union ice_device_id {
> - struct {
> - __le16 device_id;
> - __le16 vendor_id;
> - } dev_vend_id;
> - __le32 id;
> -};
> -
> -struct ice_device_id_entry {
> - union ice_device_id device;
> - union ice_device_id sub_device;
> -};
> -
> -struct ice_seg {
> - struct ice_generic_seg_hdr hdr;
> - __le32 device_table_count;
> - struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
> -};
> -
> -struct ice_nvm_table {
> - __le32 table_count;
> - __le32 vers[STRUCT_HACK_VAR_LEN];
> -};
> -
> -struct ice_buf {
> -#define ICE_PKG_BUF_SIZE 4096
> - u8 buf[ICE_PKG_BUF_SIZE];
> -};
> -
> -struct ice_buf_table {
> - __le32 buf_count;
> - struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
> -};
> -
> -/* global metadata specific segment */
> -struct ice_global_metadata_seg {
> - struct ice_generic_seg_hdr hdr;
> - struct ice_pkg_ver pkg_ver;
> - __le32 rsvd;
> - char pkg_name[ICE_PKG_NAME_SIZE];
> -};
> -
> -#define ICE_MIN_S_OFF 12
> -#define ICE_MAX_S_OFF 4095
> -#define ICE_MIN_S_SZ 1
> -#define ICE_MAX_S_SZ 4084
> -
> -/* section information */
> -struct ice_section_entry {
> - __le32 type;
> - __le16 offset;
> - __le16 size;
> -};
> -
> -#define ICE_MIN_S_COUNT 1
> -#define ICE_MAX_S_COUNT 511
> -#define ICE_MIN_S_DATA_END 12
> -#define ICE_MAX_S_DATA_END 4096
> -
> -#define ICE_METADATA_BUF 0x80000000
> -
> -struct ice_buf_hdr {
> - __le16 section_count;
> - __le16 data_end;
> - struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
> -};
> -
> -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
> - ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
> - (ent_sz))
> -
> -/* ice package section IDs */
> -#define ICE_SID_METADATA 1
> -#define ICE_SID_XLT0_SW 10
> -#define ICE_SID_XLT_KEY_BUILDER_SW 11
> -#define ICE_SID_XLT1_SW 12
> -#define ICE_SID_XLT2_SW 13
> -#define ICE_SID_PROFID_TCAM_SW 14
> -#define ICE_SID_PROFID_REDIR_SW 15
> -#define ICE_SID_FLD_VEC_SW 16
> -#define ICE_SID_CDID_KEY_BUILDER_SW 17
> -#define ICE_SID_CDID_REDIR_SW 18
> -
> -#define ICE_SID_XLT0_ACL 20
> -#define ICE_SID_XLT_KEY_BUILDER_ACL 21
> -#define ICE_SID_XLT1_ACL 22
> -#define ICE_SID_XLT2_ACL 23
> -#define ICE_SID_PROFID_TCAM_ACL 24
> -#define ICE_SID_PROFID_REDIR_ACL 25
> -#define ICE_SID_FLD_VEC_ACL 26
> -#define ICE_SID_CDID_KEY_BUILDER_ACL 27
> -#define ICE_SID_CDID_REDIR_ACL 28
> -
> -#define ICE_SID_XLT0_FD 30
> -#define ICE_SID_XLT_KEY_BUILDER_FD 31
> -#define ICE_SID_XLT1_FD 32
> -#define ICE_SID_XLT2_FD 33
> -#define ICE_SID_PROFID_TCAM_FD 34
> -#define ICE_SID_PROFID_REDIR_FD 35
> -#define ICE_SID_FLD_VEC_FD 36
> -#define ICE_SID_CDID_KEY_BUILDER_FD 37
> -#define ICE_SID_CDID_REDIR_FD 38
> -
> -#define ICE_SID_XLT0_RSS 40
> -#define ICE_SID_XLT_KEY_BUILDER_RSS 41
> -#define ICE_SID_XLT1_RSS 42
> -#define ICE_SID_XLT2_RSS 43
> -#define ICE_SID_PROFID_TCAM_RSS 44
> -#define ICE_SID_PROFID_REDIR_RSS 45
> -#define ICE_SID_FLD_VEC_RSS 46
> -#define ICE_SID_CDID_KEY_BUILDER_RSS 47
> -#define ICE_SID_CDID_REDIR_RSS 48
> -
> -#define ICE_SID_RXPARSER_CAM 50
> -#define ICE_SID_RXPARSER_NOMATCH_CAM 51
> -#define ICE_SID_RXPARSER_IMEM 52
> -#define ICE_SID_RXPARSER_XLT0_BUILDER 53
> -#define ICE_SID_RXPARSER_NODE_PTYPE 54
> -#define ICE_SID_RXPARSER_MARKER_PTYPE 55
> -#define ICE_SID_RXPARSER_BOOST_TCAM 56
> -#define ICE_SID_RXPARSER_PROTO_GRP 57
> -#define ICE_SID_RXPARSER_METADATA_INIT 58
> -#define ICE_SID_RXPARSER_XLT0 59
> -
> -#define ICE_SID_TXPARSER_CAM 60
> -#define ICE_SID_TXPARSER_NOMATCH_CAM 61
> -#define ICE_SID_TXPARSER_IMEM 62
> -#define ICE_SID_TXPARSER_XLT0_BUILDER 63
> -#define ICE_SID_TXPARSER_NODE_PTYPE 64
> -#define ICE_SID_TXPARSER_MARKER_PTYPE 65
> -#define ICE_SID_TXPARSER_BOOST_TCAM 66
> -#define ICE_SID_TXPARSER_PROTO_GRP 67
> -#define ICE_SID_TXPARSER_METADATA_INIT 68
> -#define ICE_SID_TXPARSER_XLT0 69
> -
> -#define ICE_SID_RXPARSER_INIT_REDIR 70
> -#define ICE_SID_TXPARSER_INIT_REDIR 71
> -#define ICE_SID_RXPARSER_MARKER_GRP 72
> -#define ICE_SID_TXPARSER_MARKER_GRP 73
> -#define ICE_SID_RXPARSER_LAST_PROTO 74
> -#define ICE_SID_TXPARSER_LAST_PROTO 75
> -#define ICE_SID_RXPARSER_PG_SPILL 76
> -#define ICE_SID_TXPARSER_PG_SPILL 77
> -#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
> -#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
> -
> -#define ICE_SID_XLT0_PE 80
> -#define ICE_SID_XLT_KEY_BUILDER_PE 81
> -#define ICE_SID_XLT1_PE 82
> -#define ICE_SID_XLT2_PE 83
> -#define ICE_SID_PROFID_TCAM_PE 84
> -#define ICE_SID_PROFID_REDIR_PE 85
> -#define ICE_SID_FLD_VEC_PE 86
> -#define ICE_SID_CDID_KEY_BUILDER_PE 87
> -#define ICE_SID_CDID_REDIR_PE 88
> -
> -#define ICE_SID_RXPARSER_FLAG_REDIR 97
> -
> -/* Label Metadata section IDs */
> -#define ICE_SID_LBL_FIRST 0x80000010
> -#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
> -#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
> -#define ICE_SID_LBL_RESERVED_12 0x80000012
> -#define ICE_SID_LBL_RESERVED_13 0x80000013
> -#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
> -#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
> -#define ICE_SID_LBL_PTYPE 0x80000016
> -#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
> -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
> -#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
> -#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
> -#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
> -#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
> -#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
> -#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
> -#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
> -#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
> -#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
> -#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
> -#define ICE_SID_LBL_FLAG 0x80000023
> -#define ICE_SID_LBL_REG 0x80000024
> -#define ICE_SID_LBL_SW_PTG 0x80000025
> -#define ICE_SID_LBL_ACL_PTG 0x80000026
> -#define ICE_SID_LBL_PE_PTG 0x80000027
> -#define ICE_SID_LBL_RSS_PTG 0x80000028
> -#define ICE_SID_LBL_FD_PTG 0x80000029
> -#define ICE_SID_LBL_SW_VSIG 0x8000002A
> -#define ICE_SID_LBL_ACL_VSIG 0x8000002B
> -#define ICE_SID_LBL_PE_VSIG 0x8000002C
> -#define ICE_SID_LBL_RSS_VSIG 0x8000002D
> -#define ICE_SID_LBL_FD_VSIG 0x8000002E
> -#define ICE_SID_LBL_PTYPE_META 0x8000002F
> -#define ICE_SID_LBL_SW_PROFID 0x80000030
> -#define ICE_SID_LBL_ACL_PROFID 0x80000031
> -#define ICE_SID_LBL_PE_PROFID 0x80000032
> -#define ICE_SID_LBL_RSS_PROFID 0x80000033
> -#define ICE_SID_LBL_FD_PROFID 0x80000034
> -#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
> -#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
> -#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
> -#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
> -/* The following define MUST be updated to reflect the last label section
> ID */
> -#define ICE_SID_LBL_LAST 0x80000038
> -
> -enum ice_block {
> - ICE_BLK_SW = 0,
> - ICE_BLK_ACL,
> - ICE_BLK_FD,
> - ICE_BLK_RSS,
> - ICE_BLK_PE,
> - ICE_BLK_COUNT
> -};
> -
> -enum ice_sect {
> - ICE_XLT0 = 0,
> - ICE_XLT_KB,
> - ICE_XLT1,
> - ICE_XLT2,
> - ICE_PROF_TCAM,
> - ICE_PROF_REDIR,
> - ICE_VEC_TBL,
> - ICE_CDID_KB,
> - ICE_CDID_REDIR,
> - ICE_SECT_COUNT
> -};
> -
> /* Packet Type (PTYPE) values */
> #define ICE_PTYPE_MAC_PAY 1
> #define ICE_MAC_PTP 2
> @@ -662,25 +418,6 @@ struct ice_boost_tcam_section {
> sizeof(struct ice_boost_tcam_entry), \
> sizeof(struct ice_boost_tcam_entry))
>
> -/* package Marker PType TCAM entry */
> -struct ice_marker_ptype_tcam_entry {
> -#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
> - __le16 addr;
> - __le16 ptype;
> - u8 keys[20];
> -};
> -
> -struct ice_marker_ptype_tcam_section {
> - __le16 count;
> - __le16 reserved;
> - struct ice_marker_ptype_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
> -};
> -
> -#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF
> ICE_MAX_ENTRIES_IN_BUF( \
> - ice_struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1)
> - \
> - sizeof(struct ice_marker_ptype_tcam_entry), \
> - sizeof(struct ice_marker_ptype_tcam_entry))
> -
> struct ice_xlt1_section {
> __le16 count;
> __le16 offset;
> @@ -699,27 +436,6 @@ struct ice_prof_redir_section {
> u8 redir_value[STRUCT_HACK_VAR_LEN];
> };
>
> -/* package buffer building */
> -
> -struct ice_buf_build {
> - struct ice_buf buf;
> - u16 reserved_section_table_entries;
> -};
> -
> -struct ice_pkg_enum {
> - struct ice_buf_table *buf_table;
> - u32 buf_idx;
> -
> - u32 type;
> - struct ice_buf_hdr *buf;
> - u32 sect_idx;
> - void *sect;
> - u32 sect_type;
> -
> - u32 entry_idx;
> - void *(*handler)(u32 sect_type, void *section, u32 index, u32
> *offset);
> -};
> -
> /* Tunnel enabling */
>
> enum ice_tunnel_type {
> diff --git a/drivers/net/ice/base/ice_switch.c
> b/drivers/net/ice/base/ice_switch.c
> index 513623a0a4..ad61dde397 100644
> --- a/drivers/net/ice/base/ice_switch.c
> +++ b/drivers/net/ice/base/ice_switch.c
> @@ -7417,37 +7417,18 @@ ice_create_recipe_group(struct ice_hw *hw,
> struct ice_sw_recipe *rm,
> * @hw: pointer to hardware structure
> * @lkups: lookup elements or match criteria for the advanced recipe, one
> * structure per protocol header
> - * @lkups_cnt: number of protocols
> * @bm: bitmap of field vectors to consider
> * @fv_list: pointer to a list that holds the returned field vectors
> */
> static enum ice_status
> -ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16
> lkups_cnt,
> +ice_get_fv(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
> ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
> {
> - enum ice_status status;
> - u8 *prot_ids;
> - u16 i;
> -
> - if (!lkups_cnt)
> + if (!lkups->n_val_words)
> return ICE_SUCCESS;
>
> - prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
> - if (!prot_ids)
> - return ICE_ERR_NO_MEMORY;
> -
> - for (i = 0; i < lkups_cnt; i++)
> - if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
> - status = ICE_ERR_CFG;
> - goto free_mem;
> - }
> -
> /* Find field vectors that include all specified protocol types */
> - status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
> -
> -free_mem:
> - ice_free(hw, prot_ids);
> - return status;
> + return ice_get_sw_fv_list(hw, lkups, bm, fv_list);
> }
>
> /**
> @@ -7840,16 +7821,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct
> ice_adv_lkup_elem *lkups,
> */
> ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
>
> - /* If it is a packet to match any, add a lookup element to match
> direction
> - * flag of source interface.
> - */
> - if (rinfo->tun_type == ICE_SW_TUN_AND_NON_TUN &&
> - lkups_cnt < ICE_MAX_CHAIN_WORDS) {
> - lkups[lkups_cnt].type = ICE_FLG_DIR;
> - lkups_cnt++;
> - }
> -
> - status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
> + status = ice_get_fv(hw, lkup_exts, fv_bitmap, &rm->fv_list);
> if (status)
> goto err_unroll;
>
> diff --git a/drivers/net/ice/base/ice_type.h
> b/drivers/net/ice/base/ice_type.h
> index a17accff19..d94fdcda67 100644
> --- a/drivers/net/ice/base/ice_type.h
> +++ b/drivers/net/ice/base/ice_type.h
> @@ -5,54 +5,15 @@
> #ifndef _ICE_TYPE_H_
> #define _ICE_TYPE_H_
>
> -#define ETH_ALEN 6
> -
> -#define ETH_HEADER_LEN 14
> -
> -#define BIT(a) (1UL << (a))
> -#define BIT_ULL(a) (1ULL << (a))
> -
> -#define BITS_PER_BYTE 8
> -
> -#define _FORCE_
> -
> -#define ICE_BYTES_PER_WORD 2
> -#define ICE_BYTES_PER_DWORD 4
> -#define ICE_MAX_TRAFFIC_CLASS 8
> -
> -/**
> - * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
> - * @a: value to round up
> - * @b: arbitrary multiple
> - *
> - * Round up to the next multiple of the arbitrary b.
> - * Note, when b is a power of 2 use ICE_ALIGN() instead.
> - */
> -#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
> -
> -#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
> -
> -#define IS_ASCII(_ch) ((_ch) < 0x80)
> -
> -#define STRUCT_HACK_VAR_LEN
> -/**
> - * ice_struct_size - size of struct with C99 flexible array member
> - * @ptr: pointer to structure
> - * @field: flexible array member (last member of the structure)
> - * @num: number of elements of that flexible array member
> - */
> -#define ice_struct_size(ptr, field, num) \
> - (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
> -
> -#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
> -
> +#include "ice_defs.h"
> #include "ice_status.h"
> #include "ice_hw_autogen.h"
> #include "ice_devids.h"
> #include "ice_osdep.h"
> #include "ice_bitops.h" /* Must come before ice_controlq.h */
> -#include "ice_controlq.h"
> #include "ice_lan_tx_rx.h"
> +#include "ice_ddp.h"
> +#include "ice_controlq.h"
> #include "ice_flex_type.h"
> #include "ice_protocol_type.h"
> #include "ice_sbq_cmd.h"
> @@ -191,11 +152,6 @@ enum ice_aq_res_ids {
> #define ICE_CHANGE_LOCK_TIMEOUT 1000
> #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
>
> -enum ice_aq_res_access_type {
> - ICE_RES_READ = 1,
> - ICE_RES_WRITE
> -};
> -
> struct ice_driver_ver {
> u8 major_ver;
> u8 minor_ver;
> @@ -248,6 +204,7 @@ enum ice_mac_type {
> ICE_MAC_UNKNOWN = 0,
> ICE_MAC_E810,
> ICE_MAC_GENERIC,
> + ICE_MAC_GENERIC_3K,
> };
>
> /* Media Types */
> @@ -636,6 +593,7 @@ struct ice_hw_common_caps {
> #define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
> bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
> #define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
> + bool tx_sched_topo_comp_mode_en;
> };
>
> /* IEEE 1588 TIME_SYNC specific info */
> @@ -1247,7 +1205,9 @@ struct ice_hw {
> /* Active package version (currently active) */
> struct ice_pkg_ver active_pkg_ver;
> u32 pkg_seg_id;
> + u32 pkg_sign_type;
> u32 active_track_id;
> + u8 pkg_has_signing_seg:1;
> u8 active_pkg_name[ICE_PKG_NAME_SIZE];
> u8 active_pkg_in_nvm;
>
> diff --git a/drivers/net/ice/base/ice_vlan_mode.c
> b/drivers/net/ice/base/ice_vlan_mode.c
> index 29c6509fc5..d1003a5a89 100644
> --- a/drivers/net/ice/base/ice_vlan_mode.c
> +++ b/drivers/net/ice/base/ice_vlan_mode.c
> @@ -4,6 +4,7 @@
>
> #include "ice_common.h"
>
> +#include "ice_ddp.h"
> /**
> * ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN
> mode (DVM)
> * @hw: pointer to the HW struct
> diff --git a/drivers/net/ice/base/meson.build
> b/drivers/net/ice/base/meson.build
> index 3cf4ce05fa..41ed2d96c6 100644
> --- a/drivers/net/ice/base/meson.build
> +++ b/drivers/net/ice/base/meson.build
> @@ -26,6 +26,7 @@ sources = [
> 'ice_flg_rd.c',
> 'ice_xlt_kb.c',
> 'ice_parser_rt.c',
> + 'ice_ddp.c',
> ]
>
> error_cflags = [
> --
> 2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (19 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 20/70] net/ice/base: refactor DDP code Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 6:45 ` Yang, Qiming
2022-08-15 7:31 ` [PATCH v2 22/70] net/ice/base: support double VLAN rules Qi Zhang
` (49 subsequent siblings)
70 siblings, 1 reply; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang
The E822 has a generic PCI device ID that can be used in the PLDM
header when updating the device so add it.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_devids.h | 3 ++-
drivers/net/ice/ice_ethdev.c | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_devids.h b/drivers/net/ice/base/ice_devids.h
index 96f2528c5e..96dbb92e0a 100644
--- a/drivers/net/ice/base/ice_devids.h
+++ b/drivers/net/ice/base/ice_devids.h
@@ -6,7 +6,6 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
-/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
#define ICE_DEV_ID_E823L_SFP 0x124D
@@ -31,6 +30,8 @@
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 2e522376e3..551be3566f 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -187,6 +187,7 @@ static const struct rte_pci_id pci_id_ice_map[] = {
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822_SI_DFLT) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID
2022-08-15 7:31 ` [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID Qi Zhang
@ 2022-08-15 6:45 ` Yang, Qiming
0 siblings, 0 replies; 149+ messages in thread
From: Yang, Qiming @ 2022-08-15 6:45 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID
>
> The E822 has a generic PCI device ID that can be used in the PLDM header
> when updating the device so add it.
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ice/base/ice_devids.h | 3 ++-
> drivers/net/ice/ice_ethdev.c | 1 +
> 2 files changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ice/base/ice_devids.h
> b/drivers/net/ice/base/ice_devids.h
> index 96f2528c5e..96dbb92e0a 100644
> --- a/drivers/net/ice/base/ice_devids.h
> +++ b/drivers/net/ice/base/ice_devids.h
> @@ -6,7 +6,6 @@
> #define _ICE_DEVIDS_H_
>
> /* Device IDs */
> -/* Intel(R) Ethernet Connection E823-L for backplane */
> #define ICE_DEV_ID_E823L_BACKPLANE 0x124C
> /* Intel(R) Ethernet Connection E823-L for SFP */
> #define ICE_DEV_ID_E823L_SFP 0x124D
> @@ -31,6 +30,8 @@
> /* Intel(R) Ethernet Controller E810-XXV for SFP */
> #define ICE_DEV_ID_E810_XXV_SFP 0x159B
> /* Intel(R) Ethernet Connection E823-C for backplane */
> +#define ICE_DEV_ID_E822_SI_DFLT 0x1888
> +/* Intel(R) Ethernet Connection E823-L for backplane */
> #define ICE_DEV_ID_E823C_BACKPLANE 0x188A
> /* Intel(R) Ethernet Connection E823-C for QSFP */
> #define ICE_DEV_ID_E823C_QSFP 0x188B
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> 2e522376e3..551be3566f 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -187,6 +187,7 @@ static const struct rte_pci_id pci_id_ice_map[] = {
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP) },
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E823C_10G_BASE_T) },
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E823C_SGMII) },
> + { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E822_SI_DFLT) },
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E822C_BACKPLANE) },
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E822C_QSFP) },
> { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
> --
> 2.31.1
21/70-32/70
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 22/70] net/ice/base: support double VLAN rules
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (20 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 21/70] net/ice/base: add E822 generic PCI device ID Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 23/70] net/ice/base: report NVM version numbers on mismatch Qi Zhang
` (48 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Wiktor Pilarczyk
Add support for double vlan rules with c-tag and s-tag in it.
Enable the caller to configure double vlan rules, and use extended
package capabilities to allow adding flow with double vlans.
The patch also re-order the code in ice_switch.c to align with
kernel driver.
Signed-off-by: Wiktor Pilarczyk <wiktor.pilarczyk@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 846 ++++++++++++++++--------------
1 file changed, 441 insertions(+), 405 deletions(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index ad61dde397..e59d191c46 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -15,8 +15,8 @@
#define ICE_PPP_IPV6_PROTO_ID 0x0057
#define ICE_TCP_PROTO_ID 0x06
#define ICE_GTPU_PROFILE 24
-#define ICE_ETH_P_8021Q 0x8100
#define ICE_MPLS_ETHER_ID 0x8847
+#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@@ -321,25 +321,6 @@ static const u8 dummy_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + MPLS dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_ETYPE_OL, 12 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-/* Dummy packet for MAC + MPLS */
-static const u8 dummy_mpls_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x88, 0x47, /* ICE_ETYPE_OL 12 */
- 0x00, 0x00, 0x01, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
@@ -1115,63 +1096,198 @@ static const u8 dummy_ipv6_gtpu_ipv6_packet[] = {
0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_UDP_OF, 34 },
- { ICE_GTP, 42 },
+ { ICE_GTP_NO_PAY, 42 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_gtp_packet[] = {
+static const
+struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP_NO_PAY, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_ipv6_gtp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+ 0x86, 0xdd,
- 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x08, 0x00, /* ICE_ETYPE_OL 20 */
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_UDP_ILOS, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv4_udp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x08, 0x00, /* ICE_ETYPE_OL 20 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
- 0x00, 0x1c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
- 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_TCP_IL, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv4_tcp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x85,
- 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x08, 0x00, /* ICE_ETYPE_OL 20 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_ipv4_gtp_no_pay_packet_offsets[] = {
+static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_UDP_OF, 34 },
- { ICE_GTP_NO_PAY, 42 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV6_OFOS, 22 },
{ ICE_PROTOCOL_LAST, 0 },
};
+static const u8 dummy_qinq_ipv6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x00, 0x3b, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
static const
-struct ice_dummy_pkt_offsets dummy_ipv6_gtp_no_pay_packet_offsets[] = {
+struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_IPV6_OFOS, 14 },
- { ICE_UDP_OF, 54 },
- { ICE_GTP_NO_PAY, 62 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_UDP_ILOS, 62 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_ipv6_gtp_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+static const u8 dummy_qinq_ipv6_udp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x86, 0xdd,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
- 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1181,13 +1297,100 @@ static const u8 dummy_ipv6_gtp_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const
+struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+ { ICE_ETYPE_OL, 20 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_TCP_IL, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+ 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
- 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + MPLS dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_mpls_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + MPLS */
+static const u8 dummy_mpls_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x47, /* ICE_ETYPE_OL 12 */
+ 0x00, 0x00, 0x01, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_udp_gtp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_gtp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x30, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* ICE_UDP_OF 34 */
+ 0x00, 0x1c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x0c, /* ICE_GTP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x00, 0x00, 0x00, 0x00,
};
static const struct ice_dummy_pkt_offsets dummy_pppoe_packet_offsets[] = {
@@ -1511,265 +1714,78 @@ static const u8 dummy_ipv4_ah_pkt[] = {
static const struct ice_dummy_pkt_offsets dummy_ipv6_ah_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_IPV6_OFOS, 14 },
- { ICE_AH, 54 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_ipv6_ah_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x86, 0xDD,
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
- 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
-};
-
-static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_UDP_ILOS, 34 },
- { ICE_NAT_T, 42 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_ipv4_nat_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
-
- 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
- 0x00, 0x00, 0x40, 0x00,
- 0x40, 0x11, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
-};
-
-static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV6_OFOS, 14 },
- { ICE_UDP_ILOS, 54 },
- { ICE_NAT_T, 62 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_ipv6_nat_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x86, 0xDD,
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
- 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
-
-};
-
-static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV4_OFOS, 14 },
- { ICE_L2TPV3, 34 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_ipv4_l2tpv3_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
-
- 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
- 0x00, 0x00, 0x40, 0x00,
- 0x40, 0x73, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
-};
-
-static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_IPV6_OFOS, 14 },
- { ICE_L2TPV3, 54 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_ipv6_l2tpv3_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x86, 0xDD,
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
- 0x00, 0x0c, 0x73, 0x40,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
-};
-
-static const struct ice_dummy_pkt_offsets dummy_qinq_ipv4_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV4_OFOS, 22 },
- { ICE_PROTOCOL_LAST, 0 },
-};
-
-static const u8 dummy_qinq_ipv4_pkt[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x08, 0x00, /* ICE_ETYPE_OL 20 */
-
- 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_OFOS 22 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
-};
-
-static const
-struct ice_dummy_pkt_offsets dummy_qinq_ipv4_udp_packet_offsets[] = {
- { ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV4_OFOS, 22 },
- { ICE_UDP_ILOS, 42 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_AH, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_qinq_ipv4_udp_pkt[] = {
+static const u8 dummy_ipv6_ah_pkt[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xDD,
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x08, 0x00, /* ICE_ETYPE_OL 20 */
-
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x11, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x0c, 0x33, 0x00, /* Next header AH */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_AH 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_qinq_ipv4_tcp_packet_offsets[] = {
+static const struct ice_dummy_pkt_offsets dummy_ipv4_nat_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV4_OFOS, 22 },
- { ICE_TCP_IL, 42 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_NAT_T, 42 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_qinq_ipv4_tcp_pkt[] = {
+static const u8 dummy_ipv4_nat_pkt[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x08, 0x00, /* ICE_ETYPE_OL 20 */
-
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x24, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 34 */
0x00, 0x00, 0x00, 0x00,
+
0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_qinq_ipv6_packet_offsets[] = {
+static const struct ice_dummy_pkt_offsets dummy_ipv6_nat_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV6_OFOS, 22 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_NAT_T, 62 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_qinq_ipv6_pkt[] = {
+static const u8 dummy_ipv6_nat_pkt[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xDD,
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
- 0x00, 0x00, 0x3b, 0x00,
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header NAT_T */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1779,68 +1795,55 @@ static const u8 dummy_qinq_ipv6_pkt[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, 0x11, 0x94, /* ICE_NAT_T 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+
};
-static const
-struct ice_dummy_pkt_offsets dummy_qinq_ipv6_udp_packet_offsets[] = {
+static const struct ice_dummy_pkt_offsets dummy_ipv4_l2tpv3_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV6_OFOS, 22 },
- { ICE_UDP_ILOS, 62 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_qinq_ipv6_udp_pkt[] = {
+static const u8 dummy_ipv4_l2tpv3_pkt[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
- 0x00, 0x08, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
-static const
-struct ice_dummy_pkt_offsets dummy_qinq_ipv6_tcp_packet_offsets[] = {
+static const struct ice_dummy_pkt_offsets dummy_ipv6_l2tpv3_packet_offsets[] = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_EX, 12 },
- { ICE_VLAN_IN, 16 },
- { ICE_ETYPE_OL, 20 },
- { ICE_IPV6_OFOS, 22 },
- { ICE_TCP_IL, 62 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
+static const u8 dummy_ipv6_l2tpv3_pkt[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xDD,
- 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
- 0x86, 0xDD, /* ICE_ETYPE_OL 20 */
-
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
- 0x00, 0x14, 0x06, 0x00, /* Next header TCP */
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -1850,13 +1853,10 @@ static const u8 dummy_qinq_ipv6_tcp_pkt[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
- 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
0x00, 0x00, 0x00, 0x00,
- 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
-
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
static const struct ice_dummy_pkt_offsets dummy_qinq_pppoe_packet_offsets[] = {
@@ -5340,6 +5340,83 @@ ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
}
+/**
+ * ice_get_lg_act_aqc_res_type - get resource type for a large action
+ * @res_type: resource type to be filled in case of function success
+ * @num_acts: number of actions to hold with a large action entry
+ *
+ * Get resource type for a large action depending on the number
+ * of single actions that it contains.
+ */
+static enum ice_status
+ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
+{
+ if (!res_type)
+ return ICE_ERR_BAD_PTR;
+
+ /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
+ * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
+ * If num_acts is greater than 2, then use
+ * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
+ * The num_acts cannot be equal to 0 or greater than 4.
+ */
+ switch (num_acts) {
+ case 1:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1;
+ break;
+ case 2:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2;
+ break;
+ case 3:
+ case 4:
+ *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_res_lg_act - add large action resource
+ * @hw: pointer to the hardware structure
+ * @l_id: large action ID to fill it in
+ * @num_acts: number of actions to hold with a large action entry
+ */
+static enum ice_status
+ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len, res_type;
+
+ if (!l_id)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_get_lg_act_aqc_res_type(&res_type, num_acts);
+ if (status)
+ return status;
+
+ /* Allocate resource for large action */
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->res_type = CPU_TO_LE16(res_type);
+ sw_buf->num_elems = CPU_TO_LE16(1);
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
+
+ ice_free(hw, sw_buf);
+
+ return status;
+}
+
/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
@@ -6419,53 +6496,6 @@ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
counter_id);
}
-/**
- * ice_alloc_res_lg_act - add large action resource
- * @hw: pointer to the hardware structure
- * @l_id: large action ID to fill it in
- * @num_acts: number of actions to hold with a large action entry
- */
-static enum ice_status
-ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
-{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
- u16 buf_len;
-
- if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
- return ICE_ERR_PARAM;
-
- /* Allocate resource for large action */
- buf_len = ice_struct_size(sw_buf, elem, 1);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
- if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
-
- sw_buf->num_elems = CPU_TO_LE16(1);
-
- /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
- * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
- * If num_acts is greater than 2, then use
- * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
- * The num_acts cannot exceed 4. This was ensured at the
- * beginning of the function.
- */
- if (num_acts == 1)
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
- else if (num_acts == 2)
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
- else
- sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
-
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
- if (!status)
- *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
-
- ice_free(hw, sw_buf);
- return status;
-}
-
/**
* ice_add_mac_with_sw_marker - add filter with sw marker
* @hw: pointer to the hardware structure
@@ -6690,13 +6720,13 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_VXLAN_GPE, { 8, 10, 12, 14 } },
{ ICE_NVGRE, { 0, 2, 4, 6 } },
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
+ { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
{ ICE_PPPOE, { 0, 2, 4, 6 } },
{ ICE_PFCP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
{ ICE_ESP, { 0, 2, 4, 6 } },
{ ICE_AH, { 0, 2, 4, 6, 8, 10 } },
{ ICE_NAT_T, { 8, 10, 12, 14 } },
- { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
{ ICE_VLAN_EX, { 2, 0 } },
{ ICE_VLAN_IN, { 2, 0 } },
};
@@ -6725,13 +6755,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_VXLAN_GPE, ICE_UDP_OF_HW },
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
+ { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_ESP, ICE_ESP_HW },
{ ICE_AH, ICE_AH_HW },
{ ICE_NAT_T, ICE_UDP_ILOS_HW },
- { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
{ ICE_VLAN_IN, ICE_VLAN_OL_HW },
{ ICE_FLG_DIR, ICE_META_DATA_ID_HW},
@@ -7969,7 +7999,8 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
u16 *pkt_len,
const struct ice_dummy_pkt_offsets **offsets)
{
- bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ bool tcp = false, udp = false, outer_ipv6 = false, vlan = false;
+ bool cvlan = false;
bool gre = false, mpls = false;
u16 i;
@@ -7979,15 +8010,19 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
else if (lkups[i].type == ICE_TCP_IL)
tcp = true;
else if (lkups[i].type == ICE_IPV6_OFOS)
- ipv6 = true;
- else if (lkups[i].type == ICE_VLAN_OFOS)
+ outer_ipv6 = true;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
vlan = true;
+
+ else if (lkups[i].type == ICE_VLAN_IN)
+ cvlan = true;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
CPU_TO_BE16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
CPU_TO_BE16(0xFFFF))
- ipv6 = true;
+ outer_ipv6 = true;
else if (lkups[i].type == ICE_IPV4_OFOS &&
lkups[i].h_u.ipv4_hdr.protocol ==
ICE_IPV4_NVGRE_PROTO_ID &&
@@ -7999,7 +8034,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
CPU_TO_BE16(ICE_PPP_IPV6_PROTO_ID) &&
lkups[i].m_u.pppoe_hdr.ppp_prot_id ==
0xFFFF)
- ipv6 = true;
+ outer_ipv6 = true;
else if (lkups[i].type == ICE_IPV4_IL &&
lkups[i].h_u.ipv4_hdr.protocol ==
ICE_TCP_PROTO_ID &&
@@ -8013,46 +8048,47 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
mpls = true;
}
- if ((tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
- tun_type == ICE_NON_TUN_QINQ) && ipv6) {
- if (tcp) {
- *pkt = dummy_qinq_ipv6_tcp_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt);
- *offsets = dummy_qinq_ipv6_tcp_packet_offsets;
- return;
- }
+ if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
+ tun_type == ICE_NON_TUN_QINQ) {
+ if (outer_ipv6) {
+ if (tcp) {
+ *pkt = dummy_qinq_ipv6_tcp_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv6_tcp_pkt);
+ *offsets = dummy_qinq_ipv6_tcp_packet_offsets;
+ return;
+ }
- if (udp) {
- *pkt = dummy_qinq_ipv6_udp_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt);
- *offsets = dummy_qinq_ipv6_udp_packet_offsets;
- return;
- }
+ if (udp) {
+ *pkt = dummy_qinq_ipv6_udp_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv6_udp_pkt);
+ *offsets = dummy_qinq_ipv6_udp_packet_offsets;
+ return;
+ }
- *pkt = dummy_qinq_ipv6_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
- *offsets = dummy_qinq_ipv6_packet_offsets;
- return;
- } else if (tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ ||
- tun_type == ICE_NON_TUN_QINQ) {
- if (tcp) {
- *pkt = dummy_qinq_ipv4_tcp_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt);
- *offsets = dummy_qinq_ipv4_tcp_packet_offsets;
+ *pkt = dummy_qinq_ipv6_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv6_pkt);
+ *offsets = dummy_qinq_ipv6_packet_offsets;
return;
- }
+ } else {
+ if (tcp) {
+ *pkt = dummy_qinq_ipv4_tcp_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv4_tcp_pkt);
+ *offsets = dummy_qinq_ipv4_tcp_packet_offsets;
+ return;
+ }
- if (udp) {
- *pkt = dummy_qinq_ipv4_udp_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt);
- *offsets = dummy_qinq_ipv4_udp_packet_offsets;
+ if (udp) {
+ *pkt = dummy_qinq_ipv4_udp_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv4_udp_pkt);
+ *offsets = dummy_qinq_ipv4_udp_packet_offsets;
+ return;
+ }
+
+ *pkt = dummy_qinq_ipv4_pkt;
+ *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
+ *offsets = dummy_qinq_ipv4_packet_offsets;
return;
}
-
- *pkt = dummy_qinq_ipv4_pkt;
- *pkt_len = sizeof(dummy_qinq_ipv4_pkt);
- *offsets = dummy_qinq_ipv4_packet_offsets;
- return;
}
if (tun_type == ICE_SW_TUN_PPPOE_IPV6_QINQ) {
@@ -8065,7 +8101,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_qinq_pppoe_ipv4_pkt);
*offsets = dummy_qinq_pppoe_ipv4_packet_offsets;
return;
- } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && ipv6) {
+ } else if (tun_type == ICE_SW_TUN_PPPOE_QINQ && outer_ipv6) {
*pkt = dummy_qinq_pppoe_ipv6_packet;
*pkt_len = sizeof(dummy_qinq_pppoe_ipv6_packet);
*offsets = dummy_qinq_pppoe_packet_offsets;
@@ -8249,7 +8285,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
return;
}
- if (tun_type == ICE_SW_TUN_PPPOE && ipv6) {
+ if (tun_type == ICE_SW_TUN_PPPOE && outer_ipv6) {
*pkt = dummy_pppoe_ipv6_packet;
*pkt_len = sizeof(dummy_pppoe_ipv6_packet);
*offsets = dummy_pppoe_packet_offsets;
@@ -8370,7 +8406,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
return;
}
- if (udp && !ipv6) {
+ if (udp && !outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_packet;
*pkt_len = sizeof(dummy_vlan_udp_packet);
@@ -8381,7 +8417,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_packet);
*offsets = dummy_udp_packet_offsets;
return;
- } else if (udp && ipv6) {
+ } else if (udp && outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_udp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
@@ -8392,7 +8428,7 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
*pkt_len = sizeof(dummy_udp_ipv6_packet);
*offsets = dummy_udp_ipv6_packet_offsets;
return;
- } else if ((tcp && ipv6) || ipv6) {
+ } else if ((tcp && outer_ipv6) || outer_ipv6) {
if (vlan) {
*pkt = dummy_vlan_tcp_ipv6_packet;
*pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 23/70] net/ice/base: report NVM version numbers on mismatch
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (21 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 22/70] net/ice/base: support double VLAN rules Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 24/70] net/ice/base: create duplicate detection for ACL rules Qi Zhang
` (47 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Report NVM version numbers (both detected and expected) when a
mismatch b/w driver and firmware is detected.
This would provide more useful information about which NVM
version the driver expects instead of looking up the code
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_controlq.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index cdd067ce7f..d83d0d76d0 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -495,12 +495,18 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
return false;
} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
- ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
- ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
} else {
/* Major API version is older than expected, log a warning */
- ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
+ hw->api_maj_ver, hw->api_min_ver,
+ EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
}
return true;
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 24/70] net/ice/base: create duplicate detection for ACL rules
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (22 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 23/70] net/ice/base: report NVM version numbers on mismatch Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 25/70] net/ice/base: fix incorrect function descriptions for parser Qi Zhang
` (46 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Michal Wilczynski
Currently there is no check for adding duplicate ACL rules,
this creates subtle bugs, for example unability to remove
filters. Adding check + refactoring a redundant function.
Signed-off-by: Michal Wilczynski <michal.wilczynski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_fdir.c | 99 ++++++++++++---------------------
drivers/net/ice/base/ice_fdir.h | 5 ++
2 files changed, 42 insertions(+), 62 deletions(-)
diff --git a/drivers/net/ice/base/ice_fdir.c b/drivers/net/ice/base/ice_fdir.c
index ae76361102..6bbab0c843 100644
--- a/drivers/net/ice/base/ice_fdir.c
+++ b/drivers/net/ice/base/ice_fdir.c
@@ -4204,70 +4204,56 @@ ice_fdir_update_cntrs(struct ice_hw *hw, enum ice_fltr_ptype flow,
}
/**
- * ice_cmp_ipv6_addr - compare 2 IP v6 addresses
- * @a: IP v6 address
- * @b: IP v6 address
+ * ice_fdir_comp_rules_basic - compare 2 filters
+ * @a: a Flow Director filter data structure
+ * @b: a Flow Director filter data structure
*
- * Returns 0 on equal, returns non-0 if different
+ * Returns true if the filters match
*/
-static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
+bool
+ice_fdir_comp_rules_basic(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
- return memcmp(a, b, 4 * sizeof(__be32));
+ if (a->flow_type != b->flow_type)
+ return false;
+ if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
+ return false;
+ if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
+ return false;
+
+ return true;
}
/**
- * ice_fdir_comp_rules - compare 2 filters
+ * ice_fdir_comp_rules_extended - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
-static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+bool
+ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
- enum ice_fltr_ptype flow_type = a->flow_type;
+ if (!ice_fdir_comp_rules_basic(a, b))
+ return false;
- /* The calling function already checks that the two filters have the
- * same flow_type.
- */
- if (!v6) {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.dst_port == b->ip.v4.dst_port &&
- a->ip.v4.src_port == b->ip.v4.src_port)
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.l4_header == b->ip.v4.l4_header &&
- a->ip.v4.proto == b->ip.v4.proto &&
- a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
- a->ip.v4.tos == b->ip.v4.tos)
- return true;
- }
- } else {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port &&
- !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
- b->ip.v6.dst_ip) &&
- !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
- b->ip.v6.src_ip))
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port)
- return true;
- }
- }
+ if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
+ return false;
+ if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
+ return false;
+ if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
+ return false;
+ if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
+ return false;
+ if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
+ return false;
+ if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
+ return false;
+ if (memcmp(&a->ecpri_data, &b->ecpri_data, sizeof(a->ecpri_data)))
+ return false;
+ if (memcmp(&a->ecpri_mask, &b->ecpri_mask, sizeof(a->ecpri_mask)))
+ return false;
- return false;
+ return true;
}
/**
@@ -4284,19 +4270,8 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
LIST_FOR_EACH_ENTRY(rule, &hw->fdir_list_head, ice_fdir_fltr,
fltr_node) {
- enum ice_fltr_ptype flow_type;
+ ret = ice_fdir_comp_rules_basic(rule, input);
- if (rule->flow_type != input->flow_type)
- continue;
-
- flow_type = input->flow_type;
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
- ret = ice_fdir_comp_rules(rule, input, false);
- else
- ret = ice_fdir_comp_rules(rule, input, true);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
diff --git a/drivers/net/ice/base/ice_fdir.h b/drivers/net/ice/base/ice_fdir.h
index b6325a3b1b..008636072a 100644
--- a/drivers/net/ice/base/ice_fdir.h
+++ b/drivers/net/ice/base/ice_fdir.h
@@ -294,6 +294,11 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
+bool
+ice_fdir_comp_rules_basic(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b);
+bool
+ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b);
+
enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
enum ice_status
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 25/70] net/ice/base: fix incorrect function descriptions for parser
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (23 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 24/70] net/ice/base: create duplicate detection for ACL rules Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 26/70] net/ice/base: fix endian format Qi Zhang
` (45 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Junfeng Guo
Some function descriptions for parser are mismatched, thus fixed
with this patch.
Fixes: 7fe2d98070e0 ("net/ice/base: add parser create and destroy skeleton")
Fixes: 1792942b2df6 ("net/ice/base: init boost TCAM table for parser")
Fixes: f787952d13d2 ("net/ice/base: init flag redirect table for parser")
Fixes: b3e73a812f98 ("net/ice/base: init IMEM table for parser")
Fixes: 2f7a1864cc19 ("net/ice/base: init metainit table for parser")
Fixes: 90bbd7d9545f ("net/ice/base: init marker group table for parser")
Fixes: c55b1ba93f07 ("net/ice/base: init parse graph CAM table for parser")
Fixes: 7b61be517fd5 ("net/ice/base: init protocol group table for parser")
Fixes: 111871087cdf ("net/ice/base: init ptype marker TCAM table for parser")
Fixes: 0cbacf60dce7 ("net/ice/base: init XLT key builder for parser")
Cc: stable@dpdk.org
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_bst_tcam.c | 6 +++---
drivers/net/ice/base/ice_flg_rd.c | 4 ++--
drivers/net/ice/base/ice_imem.c | 4 ++--
drivers/net/ice/base/ice_metainit.c | 4 ++--
drivers/net/ice/base/ice_mk_grp.c | 4 ++--
drivers/net/ice/base/ice_parser.c | 7 ++++---
drivers/net/ice/base/ice_pg_cam.c | 12 ++++++------
drivers/net/ice/base/ice_proto_grp.c | 4 ++--
drivers/net/ice/base/ice_ptype_mk.c | 4 ++--
drivers/net/ice/base/ice_xlt_kb.c | 10 +++++-----
10 files changed, 30 insertions(+), 29 deletions(-)
diff --git a/drivers/net/ice/base/ice_bst_tcam.c b/drivers/net/ice/base/ice_bst_tcam.c
index 306f62db2a..74a2de869e 100644
--- a/drivers/net/ice/base/ice_bst_tcam.c
+++ b/drivers/net/ice/base/ice_bst_tcam.c
@@ -53,7 +53,7 @@ static void _bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index)
/**
* ice_bst_tcam_dump - dump a boost tcam info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: boost tcam to dump
*/
void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item)
@@ -205,7 +205,7 @@ static void _bst_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_bst_tcam_table_get - create a boost tcam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw)
{
@@ -228,7 +228,7 @@ static void _parse_lbl_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_bst_lbl_table_get - create a boost label table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_flg_rd.c b/drivers/net/ice/base/ice_flg_rd.c
index 833986cac3..80d3b51ad6 100644
--- a/drivers/net/ice/base/ice_flg_rd.c
+++ b/drivers/net/ice/base/ice_flg_rd.c
@@ -9,7 +9,7 @@
/**
* ice_flg_rd_dump - dump a flag redirect item info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: flag redirect item to dump
*/
void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item)
@@ -40,7 +40,7 @@ static void _flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_flg_rd_table_get - create a flag redirect table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_imem.c b/drivers/net/ice/base/ice_imem.c
index 2136e0393b..9a76d21ce5 100644
--- a/drivers/net/ice/base/ice_imem.c
+++ b/drivers/net/ice/base/ice_imem.c
@@ -69,7 +69,7 @@ static void _imem_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int index)
/**
* ice_imem_dump - dump an imem item info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: imem item to dump
*/
void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item)
@@ -231,7 +231,7 @@ static void _imem_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_imem_table_get - create an imem table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_metainit.c b/drivers/net/ice/base/ice_metainit.c
index 3f9e5d6833..a899125b37 100644
--- a/drivers/net/ice/base/ice_metainit.c
+++ b/drivers/net/ice/base/ice_metainit.c
@@ -9,7 +9,7 @@
/**
* ice_metainit_dump - dump an metainit item info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: metainit item to dump
*/
void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item)
@@ -130,7 +130,7 @@ static void _metainit_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_metainit_table_get - create a metainit table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_mk_grp.c b/drivers/net/ice/base/ice_mk_grp.c
index 4e9ab5c13a..814001c49e 100644
--- a/drivers/net/ice/base/ice_mk_grp.c
+++ b/drivers/net/ice/base/ice_mk_grp.c
@@ -10,7 +10,7 @@
/**
* ice_mk_grp_dump - dump an marker group item info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: marker group item to dump
*/
void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item)
@@ -42,7 +42,7 @@ static void _mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_mk_grp_table_get - create a marker group table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_parser.c b/drivers/net/ice/base/ice_parser.c
index 6529f5d635..5a461d83be 100644
--- a/drivers/net/ice/base/ice_parser.c
+++ b/drivers/net/ice/base/ice_parser.c
@@ -106,7 +106,7 @@ void *ice_parser_sect_item_get(u32 sect_type, void *section,
* @item_size: item size in byte
* @length: number of items in the table to create
* @item_get: the function will be parsed to ice_pkg_enum_entry
- * @parser_item: the function to parse the item
+ * @parse_item: the function to parse the item
* @no_offset: ignore header offset, calculate index from 0
*/
void *ice_parser_create_table(struct ice_hw *hw, u32 sect_type,
@@ -359,6 +359,7 @@ static void _bst_vm_set(struct ice_parser *psr, const char *prefix, bool on)
/**
* ice_parser_dvm_set - configure double vlan mode for parser
* @psr: pointer to a parser instance
+ * @on: true to turn on; false to turn off
*/
void ice_parser_dvm_set(struct ice_parser *psr, bool on)
{
@@ -478,8 +479,8 @@ static bool _nearest_proto_id(struct ice_parser_result *rslt, u16 offset,
* ice_parser_profile_init - initialize a FXP profile base on parser result
* @rslt: a instance of a parser result
* @pkt_buf: packet data buffer
- * @pkt_msk: packet mask buffer
- * @pkt_len: packet length
+ * @msk_buf: packet mask buffer
+ * @buf_len: packet length
* @blk: FXP pipeline stage
* @prefix_match: match protocol stack exactly or only prefix
* @prof: input/output parameter to save the profile
diff --git a/drivers/net/ice/base/ice_pg_cam.c b/drivers/net/ice/base/ice_pg_cam.c
index fe461ad849..73f7c34ffd 100644
--- a/drivers/net/ice/base/ice_pg_cam.c
+++ b/drivers/net/ice/base/ice_pg_cam.c
@@ -50,7 +50,7 @@ static void _pg_cam_action_dump(struct ice_hw *hw,
/**
* ice_pg_cam_dump - dump an parse graph cam info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: parse graph cam to dump
*/
void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
@@ -62,7 +62,7 @@ void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item)
/**
* ice_pg_nm_cam_dump - dump an parse graph no match cam info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: parse graph no match cam to dump
*/
void ice_pg_nm_cam_dump(struct ice_hw *hw, struct ice_pg_nm_cam_item *item)
@@ -243,7 +243,7 @@ static void _pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_pg_cam_table_get - create a parse graph cam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
{
@@ -257,7 +257,7 @@ struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw)
/**
* ice_pg_sp_cam_table_get - create a parse graph spill cam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
{
@@ -271,7 +271,7 @@ struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw)
/**
* ice_pg_nm_cam_table_get - create a parse graph no match cam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
{
@@ -285,7 +285,7 @@ struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw)
/**
* ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_proto_grp.c b/drivers/net/ice/base/ice_proto_grp.c
index 7ce87de110..5dbe07d258 100644
--- a/drivers/net/ice/base/ice_proto_grp.c
+++ b/drivers/net/ice/base/ice_proto_grp.c
@@ -17,7 +17,7 @@ static void _proto_off_dump(struct ice_hw *hw, struct ice_proto_off *po,
/**
* ice_proto_grp_dump - dump a proto group item info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: proto group item to dump
*/
void ice_proto_grp_dump(struct ice_hw *hw, struct ice_proto_grp_item *item)
@@ -94,7 +94,7 @@ static void _proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_proto_grp_table_get - create a proto group table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_ptype_mk.c b/drivers/net/ice/base/ice_ptype_mk.c
index 97c41cb586..9807e688b1 100644
--- a/drivers/net/ice/base/ice_ptype_mk.c
+++ b/drivers/net/ice/base/ice_ptype_mk.c
@@ -9,7 +9,7 @@
/**
* ice_ptype_mk_tcam_dump - dump an ptype marker tcam info_
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @item: ptype marker tcam to dump
*/
void ice_ptype_mk_tcam_dump(struct ice_hw *hw,
@@ -41,7 +41,7 @@ static void _parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, void *item,
/**
* ice_ptype_mk_tcam_table_get - create a ptype marker tcam table
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw)
{
diff --git a/drivers/net/ice/base/ice_xlt_kb.c b/drivers/net/ice/base/ice_xlt_kb.c
index 4c1ab747cf..5efe209cad 100644
--- a/drivers/net/ice/base/ice_xlt_kb.c
+++ b/drivers/net/ice/base/ice_xlt_kb.c
@@ -25,7 +25,7 @@ static void _xlt_kb_entry_dump(struct ice_hw *hw,
/**
* ice_imem_dump - dump a xlt key build info
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
* @kb: key build to dump
*/
void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb)
@@ -154,7 +154,7 @@ static struct ice_xlt_kb *_xlt_kb_get(struct ice_hw *hw, u32 sect_type)
/**
* ice_xlt_kb_get_sw - create switch xlt key build
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
{
@@ -163,7 +163,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw)
/**
* ice_xlt_kb_get_acl - create acl xlt key build
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
{
@@ -172,7 +172,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw)
/**
* ice_xlt_kb_get_fd - create fdir xlt key build
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
{
@@ -181,7 +181,7 @@ struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw)
/**
* ice_xlt_kb_get_fd - create rss xlt key build
- * @ice_hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*/
struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw)
{
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 26/70] net/ice/base: fix endian format
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (24 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 25/70] net/ice/base: fix incorrect function descriptions for parser Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 27/70] net/ice/base: convert IO expander handle to u16 Qi Zhang
` (44 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Jacob Keller
A few functions failed to properly convert some values into Little
Endian format before sending them to the firmware. This will produce
incorrect results when running on a Big Endian platform.
Fix this by adding the necessary CPU_TO_LE* macros around the input
to firmware.
These issues were detected by sparse.
Fixes: 0f61c2af88c8 ("net/ice/base: add set/get GPIO helper functions")
Cc: stable@dpdk.org
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 57602a31e1..cb06fdf42b 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -4028,7 +4028,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
- desc.datalen = data_size;
+ desc.datalen = CPU_TO_LE16(data_size);
ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
ICE_NONDMA_TO_NONDMA);
cmd->start_address = CPU_TO_LE32(start_address);
@@ -5682,7 +5682,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
cmd = &desc.params.read_write_gpio;
- cmd->gpio_ctrl_handle = gpio_ctrl_handle;
+ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5710,7 +5710,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
- cmd->gpio_ctrl_handle = gpio_ctrl_handle;
+ cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 27/70] net/ice/base: convert IO expander handle to u16
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (25 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 26/70] net/ice/base: fix endian format Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 28/70] net/ice/base: convert array of u8 to bitmap Qi Zhang
` (43 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jacob Keller
The io_expander_handle cached value is marked as an __le16, but
several places track the node handle with u16 values. Unify all
the interfaces so that it is stored and reported as a u16, and
keep the low level conversion to LE16 only at the direct firmware
interface.
This fixes warnings from sparse about mixing __le16 and u16, and
will fix related issues on platforms which use Big Endian format.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 10 +++++++---
drivers/net/ice/base/ice_type.h | 2 +-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 1fb0c57a8c..3df0915cd3 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -4483,7 +4483,7 @@ ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
* will return cached value
*/
static enum ice_status
-ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle)
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
{
struct ice_aqc_get_link_topo cmd;
u8 node_part_number, idx;
@@ -4564,13 +4564,15 @@ ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
struct ice_aqc_link_topo_addr link_topo;
enum ice_status status;
__le16 addr;
+ u16 handle;
memset(&link_topo, 0, sizeof(link_topo));
- status = ice_get_pca9575_handle(hw, &link_topo.handle);
+ status = ice_get_pca9575_handle(hw, &handle);
if (status)
return status;
+ link_topo.handle = CPU_TO_LE16(handle);
link_topo.topo_params.node_type_ctx =
(ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED <<
ICE_AQC_LINK_TOPO_NODE_CTX_S);
@@ -4594,13 +4596,15 @@ ice_write_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 data)
struct ice_aqc_link_topo_addr link_topo;
enum ice_status status;
__le16 addr;
+ u16 handle;
memset(&link_topo, 0, sizeof(link_topo));
- status = ice_get_pca9575_handle(hw, &link_topo.handle);
+ status = ice_get_pca9575_handle(hw, &handle);
if (status)
return status;
+ link_topo.handle = CPU_TO_LE16(handle);
link_topo.topo_params.node_type_ctx =
(ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED <<
ICE_AQC_LINK_TOPO_NODE_CTX_S);
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index d94fdcda67..b8be0d948a 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1259,7 +1259,7 @@ struct ice_hw {
struct LIST_HEAD_TYPE rss_list_head;
ice_declare_bitmap(hw_ptype, ICE_FLOW_PTYPE_MAX);
u8 dvm_ena;
- __le16 io_expander_handle;
+ u16 io_expander_handle;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 28/70] net/ice/base: convert array of u8 to bitmap
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (26 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 27/70] net/ice/base: convert IO expander handle to u16 Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 29/70] net/ice/base: fix array overflow in add switch recipe code Qi Zhang
` (42 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jacob Keller, Jesse Brandeburg
Previously the ice_add_prof function took an array of u8 and looped
over it with for_each_set_bit, examining each 8 bit value as a bitmap.
This was just hard to understand and unnecessary, and was triggering
undefined behavior sanitizers with unaligned accesses within bitmap
fields. Since the ptype being passed in was already declared as a
bitmap, refactor this to use native types with the advantage of
simplifying the code to use a single loop.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_pipe.c | 76 ++++++++++------------------
drivers/net/ice/base/ice_flex_pipe.h | 6 +--
drivers/net/ice/base/ice_flow.c | 4 +-
3 files changed, 32 insertions(+), 54 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index a43d7ef76b..0840b976aa 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3170,7 +3170,7 @@ void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
- * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
* @attr: array of attributes
* @attr_cnt: number of elements in attrib array
* @es: extraction sequence (length of array is determined by the block)
@@ -3183,16 +3183,15 @@ void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
* the ID value used here.
*/
enum ice_status
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool fd_swap)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ ice_bitmap_t *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap)
{
- u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
enum ice_status status;
- u8 byte = 0;
u8 prof_id;
+ u16 ptype;
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
@@ -3241,56 +3240,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
-
- /* Examine 8 bits per byte */
- ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
-
- ptype = byte * BITS_PER_BYTE + bit;
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
+ ice_set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == ICE_ERR_MAX_LIMIT)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- ice_set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for this
- * ptype, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype, attr,
- attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a ptype/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index aab765e68f..777790a9c0 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -40,9 +40,9 @@ enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id);
enum ice_status
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool fd_swap);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ ice_bitmap_t *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool fd_swap);
void ice_init_all_prof_masks(struct ice_hw *hw);
void ice_shutdown_all_prof_masks(struct ice_hw *hw);
struct ice_prof_map *
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index bdb584c7f5..54181044f1 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2257,7 +2257,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, true);
if (status) {
@@ -2604,7 +2604,7 @@ ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
break;
}
- status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false);
if (status)
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 29/70] net/ice/base: fix array overflow in add switch recipe code
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (27 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 28/70] net/ice/base: convert array of u8 to bitmap Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 30/70] net/ice/base: fix bit finding range over ptype bitmap Qi Zhang
` (41 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Jesse Brandeburg
The array indexes in this function are used with a zero index in the
fv_idx table, and with a +1 index in the lkup_idx arrays. The code
was using the lookup index for the field vector in only one place in
this function, but the code was never used after this point so just
remove the bad line.
This was caught by the undefined behavior sanitizer.
Fixes: fed0c5ca5f19 ("net/ice/base: support programming a new switch recipe")
Cc: stable@dpdk.org
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index e59d191c46..b8e733f539 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -7315,7 +7315,6 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
l_entry) {
- last_chain_entry->fv_idx[i] = entry->chain_idx;
buf[recps].content.lkup_indx[i] = entry->chain_idx;
buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
ice_set_bit(entry->rid, rm->r_bitmap);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 30/70] net/ice/base: fix bit finding range over ptype bitmap
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (28 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 29/70] net/ice/base: fix array overflow in add switch recipe code Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 31/70] net/ice/base: move function to internal Qi Zhang
` (40 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Junfeng Guo
The 2nd argument to function ice_find_first_bit is the bitmap size,
(in bits) not a mask. Thus, use of UINT16_MAX or 0xFFFF will allow a
potential run off the end of the ptype array.
Also, the ptype bitmap (i.e., prof->ptypes) is declared with size
ICE_FLOW_PTYPE_MAX, thus finding the bits within this bitmap should
not exceed this bound.
Fixes: 8ebb93942b2c ("net/ice/base: add function to set HW profile for raw flow")
Cc: stable@dpdk.org
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flow.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 54181044f1..b196e51276 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2561,7 +2561,7 @@ ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
u16 fdir_vsi_handle, struct ice_parser_profile *prof,
enum ice_block blk)
{
- int id = ice_find_first_bit(prof->ptypes, UINT16_MAX);
+ int id = ice_find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
struct ice_flow_prof_params *params;
u8 fv_words = hw->blk[blk].es.fvw;
enum ice_status status;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 31/70] net/ice/base: move function to internal
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (29 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 30/70] net/ice/base: fix bit finding range over ptype bitmap Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-22 5:34 ` Yang, Qiming
2022-08-15 7:31 ` [PATCH v2 32/70] net/ice/base: change PHY/QUAD/ports definitions Qi Zhang
` (39 subsequent siblings)
70 siblings, 1 reply; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Junfeng Guo
The function ice_disable_fd_swap should be defined as static.
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_pipe.c | 2 +-
drivers/net/ice/base/ice_flex_pipe.h | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 0840b976aa..aea0d97b9d 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3133,7 +3133,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* @hw: pointer to the HW struct
* @prof_id: profile ID
*/
-void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
+static void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
{
u8 swap_val = ICE_SWAP_VALID;
u8 i;
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 777790a9c0..8fde36dfa6 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -38,7 +38,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
-void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, const struct ice_ptype_attributes *attr,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 31/70] net/ice/base: move function to internal
2022-08-15 7:31 ` [PATCH v2 31/70] net/ice/base: move function to internal Qi Zhang
@ 2022-08-22 5:34 ` Yang, Qiming
0 siblings, 0 replies; 149+ messages in thread
From: Yang, Qiming @ 2022-08-22 5:34 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev, Guo, Junfeng
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>
> Subject: [PATCH v2 31/70] net/ice/base: move function to internal
>
> The function ice_disable_fd_swap should be defined as static.
>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ice/base/ice_flex_pipe.c | 2 +-
> drivers/net/ice/base/ice_flex_pipe.h | 1 -
> 2 files changed, 1 insertion(+), 2 deletions(-)
>
> diff --git a/drivers/net/ice/base/ice_flex_pipe.c
> b/drivers/net/ice/base/ice_flex_pipe.c
> index 0840b976aa..aea0d97b9d 100644
> --- a/drivers/net/ice/base/ice_flex_pipe.c
> +++ b/drivers/net/ice/base/ice_flex_pipe.c
> @@ -3133,7 +3133,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8
> ptg, u16 ptype,
> * @hw: pointer to the HW struct
> * @prof_id: profile ID
> */
> -void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
> +static void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id)
> {
> u8 swap_val = ICE_SWAP_VALID;
> u8 i;
> diff --git a/drivers/net/ice/base/ice_flex_pipe.h
> b/drivers/net/ice/base/ice_flex_pipe.h
> index 777790a9c0..8fde36dfa6 100644
> --- a/drivers/net/ice/base/ice_flex_pipe.h
> +++ b/drivers/net/ice/base/ice_flex_pipe.h
> @@ -38,7 +38,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
> /* XLT2/VSI group functions */
> enum ice_status
> ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
> -void ice_disable_fd_swap(struct ice_hw *hw, u16 prof_id); enum ice_status
> ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
> ice_bitmap_t *ptypes, const struct ice_ptype_attributes *attr,
> --
> 2.31.1
31/70 ~ 50/70
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 32/70] net/ice/base: change PHY/QUAD/ports definitions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (30 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 31/70] net/ice/base: move function to internal Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 33/70] net/ice/base: add AQ command to config node attribute Qi Zhang
` (38 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
Rename PHY/QUAD/ports definitions to reflect the correct HW
specification.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 45 ++++++++++++++++---------------
drivers/net/ice/base/ice_type.h | 14 +++++-----
2 files changed, 31 insertions(+), 28 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 3df0915cd3..7ed420be8e 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -1794,9 +1794,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY;
- phy = port / ICE_PORTS_PER_PHY;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+ phy_port = port % ICE_PORTS_PER_PHY_E822;
+ phy = port / ICE_PORTS_PER_PHY_E822;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -2184,20 +2184,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
*/
-static void
+static enum ice_status
ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
+ if (quad >= ICE_MAX_QUAD)
+ return ICE_ERR_PARAM;
+
msg->dest_dev = rmn_0;
- if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
msg->msg_addr_low = ICE_LO_WORD(addr);
msg->msg_addr_high = ICE_HI_WORD(addr);
+
+ return ICE_SUCCESS;
}
/**
@@ -2218,22 +2223,21 @@ ice_read_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 *val,
struct ice_sbq_msg_input msg = {0};
enum ice_status status;
- if (quad >= ICE_MAX_QUAD)
- return ICE_ERR_PARAM;
+ status = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (status)
+ goto exit_err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_rd;
status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
- if (status) {
+exit_err:
+ if (status)
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
status);
- return status;
- }
-
- *val = msg.data;
+ else
+ *val = msg.data;
- return ICE_SUCCESS;
+ return status;
}
enum ice_status
@@ -2260,21 +2264,20 @@ ice_write_quad_reg_e822_lp(struct ice_hw *hw, u8 quad, u16 offset, u32 val,
struct ice_sbq_msg_input msg = {0};
enum ice_status status;
- if (quad >= ICE_MAX_QUAD)
- return ICE_ERR_PARAM;
+ status = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (status)
+ goto exit_err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
status = ice_sbq_rw_reg_lp(hw, &msg, lock_sbq);
- if (status) {
+exit_err:
+ if (status)
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to phy, status %d\n",
status);
- return status;
- }
- return ICE_SUCCESS;
+ return status;
}
enum ice_status
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index b8be0d948a..5c7cc06e0c 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1191,13 +1191,13 @@ struct ice_hw {
/* true if VSIs can share unicast MAC addr */
u8 umac_shared;
-#define ICE_PHY_PER_NAC 1
-#define ICE_MAX_QUAD 2
-#define ICE_NUM_QUAD_TYPE 2
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PHY_0_LAST_QUAD 1
-#define ICE_PORTS_PER_PHY 8
-#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+#define ICE_PHY_PER_NAC_E822 1
+#define ICE_MAX_QUAD 2
+#define ICE_QUADS_PER_PHY_E822 2
+#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PORTS_PER_PHY_E810 4
+#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* bitmap of enabled logical ports */
u32 ena_lports;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 33/70] net/ice/base: add AQ command to config node attribute
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (31 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 32/70] net/ice/base: change PHY/QUAD/ports definitions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 34/70] net/ice/base: fix null pointer dereference during Qi Zhang
` (37 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Ben Shelton
Added AQ command to config nod attribute.
Signed-off-by: Ben Shelton <benjamin.h.shelton@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 17 +++++++++++++++++
drivers/net/ice/base/ice_sched.c | 27 +++++++++++++++++++++++++++
drivers/net/ice/base/ice_sched.h | 4 ++++
3 files changed, 48 insertions(+)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 8f7e13096c..9f84ffca67 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1215,6 +1215,22 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
+/* Config Node Attributes (indirect 0x0419)
+ * Query Node Attributes (indirect 0x041A)
+ */
+struct ice_aqc_node_attr {
+ __le16 num_entries; /* Number of attributes structures in the buffer */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_node_attr_elem {
+ __le32 node_teid;
+ __le16 max_children;
+ __le16 children_level;
+};
+
/* Configure L2 Node CGD (indirect 0x0414)
* This indirect command allows configuring a congestion domain for given L2
* node TEIDs in the scheduler topology.
@@ -2976,6 +2992,7 @@ struct ice_aq_desc {
struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_rl_profile rl_profile;
+ struct ice_aqc_node_attr node_attr;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_cfg nvm_cfg;
struct ice_aqc_nvm_checksum nvm_checksum;
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 71b5677f43..6f938d71a1 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -839,6 +839,33 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
hw->max_cgds = 0;
}
+/**
+ * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes
+ * @hw: pointer to the HW struct
+ * @num_nodes: the number of nodes whose attributes to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure Node Attributes (0x0417)
+ */
+enum ice_status
+ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
+ struct ice_aqc_node_attr_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_node_attr *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.node_attr;
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_cfg_node_attr);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = CPU_TO_LE16(num_nodes);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
/**
* ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
* @hw: pointer to the HW struct
diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h
index c9f3f79eff..6b12a0688a 100644
--- a/drivers/net/ice/base/ice_sched.h
+++ b/drivers/net/ice/base/ice_sched.h
@@ -78,6 +78,10 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
+ struct ice_aqc_node_attr_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 34/70] net/ice/base: fix null pointer dereference during
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (32 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 33/70] net/ice/base: add AQ command to config node attribute Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 35/70] net/ice/base: refine default VSI config Qi Zhang
` (36 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Roman Storozhenko
Sometimes, during the shutdown process, an PCIe unrecoverable error
occurs. This leads to the following NULL pointer dereference error
while clearing hardware tables:
The patch fixes this bug by checking every table pointer against
NULL before reference it, as some of them probably have been cleared
in advance.
Fixes: 969890d505b1 ("net/ice/base: enable clearing of HW tables")
Cc: stable@dpdk.org
Signed-off-by: Roman Storozhenko <roman.storozhenko@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_pipe.c | 332 +++++++++++++++------------
1 file changed, 179 insertions(+), 153 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index aea0d97b9d..2d95ce4d74 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -2144,6 +2144,129 @@ void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
}
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ ice_init_lock(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
+ if (!hw->dcf_enabled)
+ ice_init_all_prof_masks(hw);
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ ice_init_flow_profs(hw, i);
+ ice_init_lock(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
+ hw->blk[i].is_list_init = true;
+
+ hw->blk[i].overwrite = blk_sizes[i].overwrite;
+ es->reverse = blk_sizes[i].reverse;
+
+ xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+ xlt1->count = blk_sizes[i].xlt1;
+
+ xlt1->ptypes = (struct ice_ptg_ptype *)
+ ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+ if (!xlt1->ptypes)
+ goto err;
+
+ xlt1->ptg_tbl = (struct ice_ptg_entry *)
+ ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+ if (!xlt1->ptg_tbl)
+ goto err;
+
+ xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+ if (!xlt1->t)
+ goto err;
+
+ xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+ xlt2->count = blk_sizes[i].xlt2;
+
+ xlt2->vsis = (struct ice_vsig_vsi *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+ if (!xlt2->vsis)
+ goto err;
+
+ xlt2->vsig_tbl = (struct ice_vsig_entry *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+ if (!xlt2->vsig_tbl)
+ goto err;
+
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
+ xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+ if (!xlt2->t)
+ goto err;
+
+ prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+ prof->count = blk_sizes[i].prof_tcam;
+ prof->max_prof_id = blk_sizes[i].prof_id;
+ prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+ prof->t = (struct ice_prof_tcam_entry *)
+ ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+ if (!prof->t)
+ goto err;
+
+ prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+ prof_redir->count = blk_sizes[i].prof_redir;
+ prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+ sizeof(*prof_redir->t));
+
+ if (!prof_redir->t)
+ goto err;
+
+ es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+ es->count = blk_sizes[i].es;
+ es->fvw = blk_sizes[i].fvw;
+ es->t = (struct ice_fv_word *)
+ ice_calloc(hw, (u32)(es->count * es->fvw),
+ sizeof(*es->t));
+ if (!es->t)
+ goto err;
+
+ es->ref_count = (u16 *)
+ ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+ if (!es->ref_count)
+ goto err;
+
+ es->written = (u8 *)
+ ice_calloc(hw, es->count, sizeof(*es->written));
+
+ if (!es->written)
+ goto err;
+
+ es->mask_ena = (u32 *)
+ ice_calloc(hw, es->count, sizeof(*es->mask_ena));
+
+ if (!es->mask_ena)
+ goto err;
+ }
+ return ICE_SUCCESS;
+
+err:
+ ice_free_hw_tbls(hw);
+ return ICE_ERR_NO_MEMORY;
+}
+
/**
* ice_fill_blk_tbls - Read package context for tables
* @hw: pointer to the hardware structure
@@ -2308,162 +2431,65 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
ice_free_vsig_tbl(hw, (enum ice_block)i);
- ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
- ICE_NONDMA_MEM);
- ice_memset(xlt1->ptg_tbl, 0,
- ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
- ICE_NONDMA_MEM);
- ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
- ICE_NONDMA_MEM);
-
- ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
- ICE_NONDMA_MEM);
- ice_memset(xlt2->vsig_tbl, 0,
- xlt2->count * sizeof(*xlt2->vsig_tbl),
- ICE_NONDMA_MEM);
- ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
- ICE_NONDMA_MEM);
-
- ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
- ICE_NONDMA_MEM);
- ice_memset(prof_redir->t, 0,
- prof_redir->count * sizeof(*prof_redir->t),
- ICE_NONDMA_MEM);
-
- ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
- ICE_NONDMA_MEM);
- ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
- ICE_NONDMA_MEM);
- ice_memset(es->written, 0, es->count * sizeof(*es->written),
- ICE_NONDMA_MEM);
- ice_memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena),
- ICE_NONDMA_MEM);
+ if (xlt1->ptypes)
+ ice_memset(xlt1->ptypes, 0,
+ xlt1->count * sizeof(*xlt1->ptypes),
+ ICE_NONDMA_MEM);
+
+ if (xlt1->ptg_tbl)
+ ice_memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+ ICE_NONDMA_MEM);
+
+ if (xlt1->t)
+ ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+ ICE_NONDMA_MEM);
+
+ if (xlt2->vsis)
+ ice_memset(xlt2->vsis, 0,
+ xlt2->count * sizeof(*xlt2->vsis),
+ ICE_NONDMA_MEM);
+
+ if (xlt2->vsig_tbl)
+ ice_memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl),
+ ICE_NONDMA_MEM);
+
+ if (xlt2->t)
+ ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+ ICE_NONDMA_MEM);
+
+ if (prof->t)
+ ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+ ICE_NONDMA_MEM);
+
+ if (prof_redir->t)
+ ice_memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t),
+ ICE_NONDMA_MEM);
+
+ if (es->t)
+ ice_memset(es->t, 0,
+ es->count * sizeof(*es->t) * es->fvw,
+ ICE_NONDMA_MEM);
+
+ if (es->ref_count)
+ ice_memset(es->ref_count, 0,
+ es->count * sizeof(*es->ref_count),
+ ICE_NONDMA_MEM);
+
+ if (es->written)
+ ice_memset(es->written, 0,
+ es->count * sizeof(*es->written),
+ ICE_NONDMA_MEM);
+
+ if (es->mask_ena)
+ ice_memset(es->mask_ena, 0,
+ es->count * sizeof(*es->mask_ena),
+ ICE_NONDMA_MEM);
}
}
-/**
- * ice_init_hw_tbls - init hardware table memory
- * @hw: pointer to the hardware structure
- */
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
-{
- u8 i;
-
- ice_init_lock(&hw->rss_locks);
- INIT_LIST_HEAD(&hw->rss_list_head);
- if (!hw->dcf_enabled)
- ice_init_all_prof_masks(hw);
- for (i = 0; i < ICE_BLK_COUNT; i++) {
- struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
- struct ice_prof_tcam *prof = &hw->blk[i].prof;
- struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
- struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
- struct ice_es *es = &hw->blk[i].es;
- u16 j;
-
- if (hw->blk[i].is_list_init)
- continue;
-
- ice_init_flow_profs(hw, i);
- ice_init_lock(&es->prof_map_lock);
- INIT_LIST_HEAD(&es->prof_map);
- hw->blk[i].is_list_init = true;
-
- hw->blk[i].overwrite = blk_sizes[i].overwrite;
- es->reverse = blk_sizes[i].reverse;
-
- xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
- xlt1->count = blk_sizes[i].xlt1;
-
- xlt1->ptypes = (struct ice_ptg_ptype *)
- ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
-
- if (!xlt1->ptypes)
- goto err;
-
- xlt1->ptg_tbl = (struct ice_ptg_entry *)
- ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
-
- if (!xlt1->ptg_tbl)
- goto err;
-
- xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
- if (!xlt1->t)
- goto err;
-
- xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
- xlt2->count = blk_sizes[i].xlt2;
-
- xlt2->vsis = (struct ice_vsig_vsi *)
- ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
-
- if (!xlt2->vsis)
- goto err;
-
- xlt2->vsig_tbl = (struct ice_vsig_entry *)
- ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
- if (!xlt2->vsig_tbl)
- goto err;
-
- for (j = 0; j < xlt2->count; j++)
- INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
-
- xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
- if (!xlt2->t)
- goto err;
-
- prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
- prof->count = blk_sizes[i].prof_tcam;
- prof->max_prof_id = blk_sizes[i].prof_id;
- prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
- prof->t = (struct ice_prof_tcam_entry *)
- ice_calloc(hw, prof->count, sizeof(*prof->t));
-
- if (!prof->t)
- goto err;
-
- prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
- prof_redir->count = blk_sizes[i].prof_redir;
- prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
- sizeof(*prof_redir->t));
-
- if (!prof_redir->t)
- goto err;
-
- es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
- es->count = blk_sizes[i].es;
- es->fvw = blk_sizes[i].fvw;
- es->t = (struct ice_fv_word *)
- ice_calloc(hw, (u32)(es->count * es->fvw),
- sizeof(*es->t));
- if (!es->t)
- goto err;
-
- es->ref_count = (u16 *)
- ice_calloc(hw, es->count, sizeof(*es->ref_count));
-
- if (!es->ref_count)
- goto err;
-
- es->written = (u8 *)
- ice_calloc(hw, es->count, sizeof(*es->written));
-
- if (!es->written)
- goto err;
-
- es->mask_ena = (u32 *)
- ice_calloc(hw, es->count, sizeof(*es->mask_ena));
-
- if (!es->mask_ena)
- goto err;
- }
- return ICE_SUCCESS;
-
-err:
- ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
-}
-
/**
* ice_prof_gen_key - generate profile ID key
* @hw: pointer to the HW struct
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 35/70] net/ice/base: refine default VSI config
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (33 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 34/70] net/ice/base: fix null pointer dereference during Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 36/70] net/ice/base: fix add mac rule Qi Zhang
` (35 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Michal Wilczynski
Refine API ice_cfg_dflt_vsi and add new API
ice_check_if_dflt_vsi.
Signed-off-by: Michal Wilczynski <michal.wilczynski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 95 +++++++++++++++++--------------
drivers/net/ice/base/ice_switch.h | 2 +
2 files changed, 53 insertions(+), 44 deletions(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index b8e733f539..124b4fad1b 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -2382,6 +2382,9 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
}
}
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -5496,24 +5499,19 @@ enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_sw_recipe *recp_list;
struct ice_fltr_info f_info;
struct ice_hw *hw = pi->hw;
- enum ice_adminq_opc opcode;
enum ice_status status;
- u16 s_rule_size;
+ u8 lport = pi->lport;
u16 hw_vsi_id;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
- s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
@@ -5521,54 +5519,63 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = pi->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- pi->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- pi->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, recp_list, lport,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- pi->dflt_tx_vsi_num = hw_vsi_id;
- pi->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- pi->dflt_rx_vsi_num = hw_vsi_id;
- pi->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ status = ice_remove_rule_internal(hw, recp_list,
+ &f_list_entry);
+
+ return status;
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
+ *
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
+ */
+bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_sw_recipe *recp_list;
+ struct ice_lock *rule_lock;
+ bool ret = false;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ ice_acquire_lock(rule_lock);
+
+ if (rule_exists && !LIST_EMPTY(rule_head))
+ *rule_exists = true;
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
}
}
-out:
- ice_free(hw, s_rule);
- return status;
+ ice_release_lock(rule_lock);
+ return ret;
}
/**
diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h
index c67cd09d21..ad1397ba5a 100644
--- a/drivers/net/ice/base/ice_switch.h
+++ b/drivers/net/ice/base/ice_switch.h
@@ -486,6 +486,8 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction);
+bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 36/70] net/ice/base: fix add mac rule
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (34 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 35/70] net/ice/base: refine default VSI config Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 37/70] net/ice/base: support Tx topo config Qi Zhang
` (34 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Grzegorz Nitka
Fix ice_add_mac_rule function by not overriding action value
with vsi id. It's possible to add MAC based switch filters
with action other than FWD_TO_VSI.
In current implementation fwd_id member of filter config
structure was always overwritten with hw vsi index, regardless
of action type.
Fix it, by setting hw vsi index only for FWD_TO_VSI action
filter and leave it as it is in case of other actions.
Fixes: 3ee1b0159ee5 ("net/ice/base: support adding MAC rules on specific port")
Cc: stable@dpdk.org
Signed-off-by: Grzegorz Nitka <grzegorz.nitka@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 124b4fad1b..edcfa89bcb 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -4858,7 +4858,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI)
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 37/70] net/ice/base: support Tx topo config
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (35 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 36/70] net/ice/base: fix add mac rule Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 38/70] net/ice/base: adjust the VSI/Aggregator layers Qi Zhang
` (33 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Victor Raj
Complete the Tx topo config implementation.
Signed-off-by: Victor Raj <victor.raj@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 3 +++
drivers/net/ice/base/ice_common.c | 3 +++
2 files changed, 6 insertions(+)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 9f84ffca67..8efbb137da 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -127,6 +127,9 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
+#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
u8 major_ver;
u8 minor_ver;
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index cb06fdf42b..db78bf4152 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2453,6 +2453,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->ext_topo_dev_img_prog_en[index]);
break;
}
+ case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 38/70] net/ice/base: adjust the VSI/Aggregator layers
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (36 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 37/70] net/ice/base: support Tx topo config Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 39/70] net/ice/base: add data typecasting to match sizes Qi Zhang
` (32 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Victor Raj
Adjust the VSI/Aggregator layers based on the number of logical
layers supported by the FW. Currently the VSI and aggregator layers are
fixed based on the 9 layer scheduler tree layout. Due to performance
reasons the number of layers of the scheduler tree is changing from
9 to 5. It requires a readjument of these VSI/Aggregator layer
values.
Signed-off-by: Victor Raj <victor.raj@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_sched.c | 34 ++++++++++++++++----------------
drivers/net/ice/base/ice_sched.h | 3 +++
2 files changed, 20 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 6f938d71a1..4d31e96fd0 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -1130,12 +1130,11 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1152,12 +1151,8 @@ static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1542,10 +1537,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1556,6 +1552,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and vsi layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@@ -4060,7 +4062,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
enum ice_status status;
u8 profile_type;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!hw || layer_num >= hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -4076,8 +4078,6 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
return NULL;
}
- if (!hw)
- return NULL;
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
@@ -4279,7 +4279,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
enum ice_status status = ICE_SUCCESS;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!hw || layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h
index 6b12a0688a..53a68dbe51 100644
--- a/drivers/net/ice/base/ice_sched.h
+++ b/drivers/net/ice/base/ice_sched.h
@@ -7,6 +7,9 @@
#include "ice_common.h"
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 39/70] net/ice/base: add data typecasting to match sizes
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (37 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 38/70] net/ice/base: adjust the VSI/Aggregator layers Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 40/70] net/ice/base: add helper function to check if device is E823 Qi Zhang
` (31 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Vignesh Sridhar
Adding typecast to variables to avoid compiler warnings generated if
variables of a particular data type are assigned to ones of a
smaller data type. For example assigning an unsigned 16 bit integer
to an 8 bit integer could trigger data loss warnings or errors.
Signed-off-by: Vignesh Sridhar <vignesh.sridhar@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_acl_ctrl.c | 34 +++++++++++++--------------
drivers/net/ice/base/ice_adminq_cmd.h | 4 ++--
drivers/net/ice/base/ice_common.c | 13 +++++-----
drivers/net/ice/base/ice_dcb.c | 8 +++----
drivers/net/ice/base/ice_flex_pipe.c | 2 +-
drivers/net/ice/base/ice_flow.c | 26 ++++++++++----------
drivers/net/ice/base/ice_nvm.c | 2 +-
drivers/net/ice/base/ice_sched.c | 5 ++--
drivers/net/ice/base/ice_switch.c | 12 +++++-----
9 files changed, 52 insertions(+), 54 deletions(-)
diff --git a/drivers/net/ice/base/ice_acl_ctrl.c b/drivers/net/ice/base/ice_acl_ctrl.c
index 27aa6b62d4..2dd08e326e 100644
--- a/drivers/net/ice/base/ice_acl_ctrl.c
+++ b/drivers/net/ice/base/ice_acl_ctrl.c
@@ -6,10 +6,10 @@
#include "ice_flow.h"
/* Determine the TCAM index of entry 'e' within the ACL table */
-#define ICE_ACL_TBL_TCAM_IDX(e) ((e) / ICE_AQC_ACL_TCAM_DEPTH)
+#define ICE_ACL_TBL_TCAM_IDX(e) ((u8)((e) / ICE_AQC_ACL_TCAM_DEPTH))
/* Determine the entry index within the TCAM */
-#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((e) % ICE_AQC_ACL_TCAM_DEPTH)
+#define ICE_ACL_TBL_TCAM_ENTRY_IDX(e) ((u16)((e) % ICE_AQC_ACL_TCAM_DEPTH))
#define ICE_ACL_SCEN_ENTRY_INVAL 0xFFFF
@@ -251,10 +251,8 @@ ice_acl_assign_act_mems_to_tcam(struct ice_acl_tbl *tbl, u8 cur_tcam,
*/
static void ice_acl_divide_act_mems_to_tcams(struct ice_acl_tbl *tbl)
{
- u16 num_cscd, stack_level, stack_idx, min_act_mem;
- u8 tcam_idx = tbl->first_tcam;
- u16 max_idx_to_get_extra;
- u8 mem_idx = 0;
+ u16 num_cscd, stack_level, stack_idx, max_idx_to_get_extra;
+ u8 min_act_mem, tcam_idx = tbl->first_tcam, mem_idx = 0;
/* Determine number of stacked TCAMs */
stack_level = DIVIDE_AND_ROUND_UP(tbl->info.depth,
@@ -326,7 +324,8 @@ ice_acl_create_tbl(struct ice_hw *hw, struct ice_acl_tbl_params *params)
depth = ICE_ALIGN(params->depth, ICE_ACL_ENTRY_ALLOC_UNIT);
if (params->entry_act_pairs < width / ICE_AQC_ACL_KEY_WIDTH_BYTES) {
- params->entry_act_pairs = width / ICE_AQC_ACL_KEY_WIDTH_BYTES;
+ params->entry_act_pairs =
+ (u8)(width / ICE_AQC_ACL_KEY_WIDTH_BYTES);
if (params->entry_act_pairs > ICE_AQC_TBL_MAX_ACTION_PAIRS)
params->entry_act_pairs = ICE_AQC_TBL_MAX_ACTION_PAIRS;
@@ -587,7 +586,7 @@ ice_acl_fill_tcam_select(struct ice_aqc_acl_scen *scen_buf,
*/
for (j = 0; j < ICE_AQC_ACL_KEY_WIDTH_BYTES; j++) {
/* PKT DIR uses the 1st location of Byte Selection Base: + 1 */
- u8 val = ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx;
+ u8 val = (u8)(ICE_AQC_ACL_BYTE_SEL_BASE + 1 + idx);
if (tcam_idx_in_cascade == cascade_cnt - 1) {
if (j == ICE_ACL_SCEN_RNG_CHK_IDX_IN_TCAM)
@@ -793,7 +792,7 @@ ice_acl_create_scen(struct ice_hw *hw, u16 match_width, u16 num_entries,
/* set the START_SET bit at the beginning of the stack */
scen_buf.tcam_cfg[k].start_cmp_set |= ICE_AQC_ACL_ALLOC_SCE_START_SET;
while (k <= last_tcam) {
- u8 last_tcam_idx_cascade = cascade_cnt + k - 1;
+ u16 last_tcam_idx_cascade = cascade_cnt + k - 1;
/* set start_cmp for the first cascaded TCAM */
scen_buf.tcam_cfg[k].start_cmp_set |=
@@ -972,10 +971,10 @@ ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
enum ice_acl_entry_prio prio, u8 *keys, u8 *inverts,
struct ice_acl_act_entry *acts, u8 acts_cnt, u16 *entry_idx)
{
- u8 i, entry_tcam, num_cscd, offset;
struct ice_aqc_acl_data buf;
+ u8 entry_tcam, offset;
+ u16 i, num_cscd, idx;
enum ice_status status = ICE_SUCCESS;
- u16 idx;
if (!scen)
return ICE_ERR_DOES_NOT_EXIST;
@@ -1005,7 +1004,7 @@ ice_acl_add_entry(struct ice_hw *hw, struct ice_acl_scen *scen,
* be programmed first; the TCAM entry of the leftmost TCAM
* should be programmed last.
*/
- offset = num_cscd - i - 1;
+ offset = (u8)(num_cscd - i - 1);
ice_memcpy(&buf.entry_key.val,
&keys[offset * sizeof(buf.entry_key.val)],
sizeof(buf.entry_key.val), ICE_NONDMA_TO_NONDMA);
@@ -1049,10 +1048,9 @@ ice_acl_prog_act(struct ice_hw *hw, struct ice_acl_scen *scen,
struct ice_acl_act_entry *acts, u8 acts_cnt,
u16 entry_idx)
{
- u8 entry_tcam, num_cscd, i, actx_idx = 0;
+ u16 idx, entry_tcam, num_cscd, i, actx_idx = 0;
struct ice_aqc_actpair act_buf;
enum ice_status status = ICE_SUCCESS;
- u16 idx;
if (entry_idx >= scen->num_entry)
return ICE_ERR_MAX_LIMIT;
@@ -1112,9 +1110,9 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
{
struct ice_aqc_actpair act_buf;
struct ice_aqc_acl_data buf;
- u8 entry_tcam, num_cscd, i;
enum ice_status status = ICE_SUCCESS;
- u16 idx;
+ u16 num_cscd, idx, i;
+ u8 entry_tcam;
if (!scen)
return ICE_ERR_DOES_NOT_EXIST;
@@ -1135,8 +1133,8 @@ ice_acl_rem_entry(struct ice_hw *hw, struct ice_acl_scen *scen, u16 entry_idx)
/* invalidate the flow entry */
ice_memset(&buf, 0, sizeof(buf), ICE_NONDMA_MEM);
for (i = 0; i < num_cscd; i++) {
- status = ice_aq_program_acl_entry(hw, entry_tcam + i, idx, &buf,
- NULL);
+ status = ice_aq_program_acl_entry(hw, (u8)(entry_tcam + i),
+ idx, &buf, NULL);
if (status)
ice_debug(hw, ICE_DBG_ACL, "AQ program ACL entry failed status: %d\n",
status);
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 8efbb137da..7f9bdd3cb0 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -2802,8 +2802,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
-#define ICE_AQC_DRIVER_PARAM_SET 0
-#define ICE_AQC_DRIVER_PARAM_GET 1
+#define ICE_AQC_DRIVER_PARAM_SET ((u8)0)
+#define ICE_AQC_DRIVER_PARAM_GET ((u8)1)
u8 param_indx;
#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
u8 rsvd[2];
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index db78bf4152..f8a3017df8 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2420,7 +2420,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
{
- u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
+ u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
caps->ext_topo_dev_img_ver_high[index] = number;
caps->ext_topo_dev_img_ver_low[index] = logical_id;
@@ -2534,11 +2534,10 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
-
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ if (clk_freq < NUM_ICE_TIME_REF_FREQ) {
+ info->time_ref = (enum ice_time_ref_freq)clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -5621,7 +5620,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
- cmd->param_indx = idx;
+ cmd->param_indx = (u8)idx;
cmd->param_val = CPU_TO_LE32(value);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@@ -5655,7 +5654,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
- cmd->param_indx = idx;
+ cmd->param_indx = (u8)idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index 3d630757f8..7a850e62f4 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -691,9 +691,9 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
+ enum ice_adminq_opc opcode;
struct ice_aq_desc desc;
- u16 opcode;
+ enum ice_status status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -885,8 +885,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
if (!err && sync && oper) {
dcbcfg->app[app_index].priority =
- (app_prio & ice_aqc_cee_app_mask) >>
- ice_aqc_cee_app_shift;
+ (u8)((app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift);
dcbcfg->app[app_index].selector = ice_app_sel_type;
dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
app_index++;
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 2d95ce4d74..63ddda2df9 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -3445,7 +3445,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
p->vsig = ICE_DEFAULT_VSIG;
- p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+ p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis);
LIST_ADD(&p->list_entry, chg);
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index b196e51276..80e7a447c3 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -1325,7 +1325,7 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw,
struct ice_flow_prof_params *params,
enum ice_flex_mdid_pkt_flags flags)
{
- u8 fv_words = hw->blk[params->blk].es.fvw;
+ u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
u8 idx;
/* Make sure the number of extraction sequence entries required does not
@@ -1341,7 +1341,7 @@ ice_flow_xtract_pkt_flags(struct ice_hw *hw,
idx = params->es_cnt;
params->es[idx].prot_id = ICE_PROT_META_ID;
- params->es[idx].off = flags;
+ params->es[idx].off = (u16)flags;
params->es_cnt++;
return ICE_SUCCESS;
@@ -1364,8 +1364,8 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+ u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
- u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 sib_mask = 0;
@@ -1548,7 +1548,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
- flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.prot_id = (u8)prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
@@ -1590,7 +1590,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
else
idx = params->es_cnt;
- params->es[idx].prot_id = prot_id;
+ params->es[idx].prot_id = (u8)prot_id;
params->es[idx].off = off;
params->mask[idx] = mask | sib_mask;
params->es_cnt++;
@@ -1769,10 +1769,10 @@ ice_flow_acl_def_entry_frmt(struct ice_flow_prof_params *params)
for (i = 0; i < params->prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = ¶ms->prof->segs[i];
- u8 j;
+ u16 j;
ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
- ICE_FLOW_FIELD_IDX_MAX) {
+ (u16)ICE_FLOW_FIELD_IDX_MAX) {
struct ice_flow_fld_info *fld = &seg->fields[j];
fld->entry.mask = ICE_FLOW_FLD_OFF_INVAL;
@@ -2765,7 +2765,7 @@ ice_flow_acl_check_actions(struct ice_hw *hw, struct ice_flow_action *acts,
/* If the caller want to add two actions of the same type, then
* it is considered invalid configuration.
*/
- if (ice_test_and_set_bit(acts[i].type, dup_check))
+ if (ice_test_and_set_bit((u16)acts[i].type, dup_check))
return ICE_ERR_PARAM;
}
@@ -2826,7 +2826,7 @@ ice_flow_acl_frmt_entry_range(u16 fld, struct ice_flow_fld_info *info,
(*(u16 *)(data + info->src.last)) << info->xtrct.disp;
u16 new_low =
(*(u16 *)(data + info->src.val)) << info->xtrct.disp;
- u8 range_idx = info->entry.val;
+ u8 range_idx = (u8)info->entry.val;
range_buf->checker_cfg[range_idx].low_boundary =
CPU_TO_BE16(new_low);
@@ -2983,10 +2983,10 @@ ice_flow_acl_frmt_entry(struct ice_hw *hw, struct ice_flow_prof *prof,
for (i = 0; i < prof->segs_cnt; i++) {
struct ice_flow_seg_info *seg = &prof->segs[i];
- u8 j;
+ u16 j;
ice_for_each_set_bit(j, (ice_bitmap_t *)&seg->match,
- ICE_FLOW_FIELD_IDX_MAX) {
+ (u16)ICE_FLOW_FIELD_IDX_MAX) {
struct ice_flow_fld_info *info = &seg->fields[j];
if (info->type == ICE_FLOW_FLD_TYPE_RANGE)
@@ -3753,13 +3753,13 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
{
struct ice_flow_seg_info *seg;
u64 val;
- u8 i;
+ u16 i;
/* set inner most segment */
seg = &segs[seg_cnt - 1];
ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
- ICE_FLOW_FIELD_IDX_MAX)
+ (u16)ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index ad2496e873..293b71905d 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -171,7 +171,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
/* Report the number of words successfully read */
- *words = bytes / 2;
+ *words = (u16)(bytes / 2);
/* Byte swap the words up to the amount we actually read */
for (i = 0; i < *words; i++)
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 4d31e96fd0..f87b1c4897 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -1369,9 +1369,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
if (status)
goto sched_query_out;
- hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
+ hw->num_tx_sched_layers =
+ (u8)LE16_TO_CPU(buf->sched_props.logical_levels);
hw->num_tx_sched_phys_layers =
- LE16_TO_CPU(buf->sched_props.phys_levels);
+ (u8)LE16_TO_CPU(buf->sched_props.phys_levels);
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index edcfa89bcb..a8f83f62ff 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -2272,8 +2272,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
~ICE_AQ_RECIPE_RESULT_EN, result_bm);
/* get the first profile that is associated with rid */
- prof = ice_find_first_bit(recipe_to_profile[idx],
- ICE_MAX_NUM_PROFILES);
+ prof = (u8)ice_find_first_bit(recipe_to_profile[idx],
+ ICE_MAX_NUM_PROFILES);
for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
@@ -4023,7 +4023,7 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules, NULL);
if (!status) {
m_ent->lg_act_idx = l_id;
- m_ent->counter_index = counter_id;
+ m_ent->counter_index = (u8)counter_id;
}
ice_free(hw, lg_act);
@@ -6341,7 +6341,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_PROMISC_VLAN:
- ice_remove_promisc(hw, lkup, &remove_list_head);
+ ice_remove_promisc(hw, (u8)lkup, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
ice_remove_mac_vlan(hw, &remove_list_head);
@@ -7183,7 +7183,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
/* Allocate the recipe resources, and configure them according to the
* match fields from protocol headers and extracted field vectors.
*/
- chain_idx = ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ chain_idx = (u8)ice_find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
u8 i;
@@ -7376,7 +7376,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
is_root = (rm->root_rid == entry->rid);
recp->is_root = is_root;
- recp->root_rid = entry->rid;
+ recp->root_rid = (u8)entry->rid;
recp->big_recp = (is_root && rm->n_grp_count > 1);
ice_memcpy(&recp->ext_words, entry->r_group.pairs,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 40/70] net/ice/base: add helper function to check if device is E823
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (38 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 39/70] net/ice/base: add data typecasting to match sizes Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 41/70] net/ice/base: add low latency Tx timestamp read Qi Zhang
` (30 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
Add a simple function checking if the device is E823-L or E823-C
based.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 25 +++++++++++++++++++++++++
drivers/net/ice/base/ice_common.h | 1 +
2 files changed, 26 insertions(+)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index f8a3017df8..c90ae20c43 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -231,6 +231,31 @@ bool ice_is_e810t(struct ice_hw *hw)
return false;
}
+/**
+ * ice_is_e823
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E823-L or E823-C based, false if not.
+ */
+bool ice_is_e823(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 1051cc1176..b15cf240f9 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -259,6 +259,7 @@ void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e823(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 41/70] net/ice/base: add low latency Tx timestamp read
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (39 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 40/70] net/ice/base: add helper function to check if device is E823 Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 42/70] net/ice/base: fix double VLAN error in promisc mode Qi Zhang
` (29 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
E810 products can support low latency Tx timestamp register read.
Add a check for the device capability and use the new method if
supported.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 7 ++-
drivers/net/ice/base/ice_ptp_hw.c | 95 +++++++++++++++++++++++++++----
drivers/net/ice/base/ice_ptp_hw.h | 12 +++-
drivers/net/ice/base/ice_type.h | 2 +
4 files changed, 101 insertions(+), 15 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index c90ae20c43..2014f8361d 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2757,7 +2757,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
- info->ena_ports = logical_id;
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->tmr_own_map = phys_id;
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
@@ -2774,8 +2775,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
- ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
- info->ena_ports);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
info->tmr_own_map);
}
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 7ed420be8e..712b7dedfb 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -4142,38 +4142,111 @@ ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
}
/**
- * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static enum ice_status
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ wr32(hw, PF_SB_ATQBAL, TS_LL_READ_TS_IDX(idx));
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ u32 val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(val & TS_LL_READ_TS)) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = (u8)(val >> TS_LL_READ_TS_HIGH_S);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return ICE_SUCCESS;
+ }
+
+ ice_usec_delay(10, false);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return ICE_ERR_NOT_READY;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
* @hw: pointer to the HW struct
* @lport: the lport to read from
* @idx: the timestamp index to read
- * @tstamp: on return, the 40bit timestamp value
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
*
- * Read a 40bit timestamp value out of the timestamp block of the external PHY
- * on the E810 device.
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
*/
static enum ice_status
-ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
{
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
enum ice_status status;
- u32 lo_addr, hi_addr, lo, hi;
-
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
- status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ status = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
if (status) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
status);
return status;
}
- status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ status = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
if (status) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
status);
return status;
}
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static enum ice_status
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ enum ice_status status;
+ u32 lo = 0;
+ u8 hi = 0;
+
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ status = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ status = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
+
+ if (status)
+ return status;
+
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index 1e016ef177..9fa17787df 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -476,8 +476,8 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
-#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
#define TS_PHY_LOW_M 0xFF
@@ -487,6 +487,16 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw);
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS BIT(31)
+#define TS_LL_READ_TS_IDX_S 24
+#define TS_LL_READ_TS_IDX_M MAKEMASK(0x3F, 0)
+#define TS_LL_READ_TS_IDX(__idx) (TS_LL_READ_TS | \
+ (((__idx) & TS_LL_READ_TS_IDX_M) << \
+ TS_LL_READ_TS_IDX_S))
+#define TS_LL_READ_TS_HIGH_S 16
+
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 5c7cc06e0c..cdfef47e94 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -651,6 +651,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -663,6 +664,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 42/70] net/ice/base: fix double VLAN error in promisc mode
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (40 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 41/70] net/ice/base: add low latency Tx timestamp read Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 43/70] net/ice/base: move functions Qi Zhang
` (28 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Grzegorz Siwik
Avoid enabling or disabling vlan 0 when trying to set promiscuous
vlan mode if double vlan mode is enabled. This fix is needed
because the driver tries to add the vlan 0 filter twice (once for
inner and once for outer) when double VLAN mode is enabled. The
filter program is rejected by the firmware when double vlan is
enabled, because the promiscuous filter only needs to be set once.
This issue was missed in the initial implementation of double vlan
mode.
Fixes: 60ff6f5ce2d8 ("net/ice/base: consolidate VF promiscuous mode")
Cc: stable@dpdk.org
Signed-off-by: Grzegorz Siwik <grzegorz.siwik@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index a8f83f62ff..6a94e3fde9 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -6263,6 +6263,13 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
list_entry) {
+ /* Avoid enabling or disabling vlan zero twice when in double
+ * vlan mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = _ice_clear_vsi_promisc(hw, vsi_handle,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 43/70] net/ice/base: move functions
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (41 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 42/70] net/ice/base: fix double VLAN error in promisc mode Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 44/70] net/ice/base: complete support for Tx balancing Qi Zhang
` (27 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jacob Keller
Move function ice_ptp_set_vernier_wl and ice_ptp_src_cmd to align with
kernel driver.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 130 +++++++++++++++---------------
1 file changed, 66 insertions(+), 64 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 712b7dedfb..dfb9d08224 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -381,6 +381,47 @@ static enum ice_status ice_init_cgu_e822(struct ice_hw *hw)
return ICE_SUCCESS;
}
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case ICE_PTP_READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ case ICE_PTP_NOP:
+ break;
+ default:
+ ice_warn(hw, "Unknown timer command %u\n", cmd);
+ return;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
/**
* ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
* @hw: pointer to HW struct
@@ -2365,6 +2406,31 @@ ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
return ICE_SUCCESS;
}
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ enum ice_status status;
+
+ status = ice_write_phy_reg_e822_lp(hw, port, P_REG_WL,
+ PTP_VERNIER_WL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
* @hw: pointer to HW struct
@@ -2817,31 +2883,6 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd,
* port.
*/
-/**
- * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
- * @hw: pointer to the HW struct
- *
- * Set the window length used for the vernier port calibration process.
- */
-enum ice_status ice_ptp_set_vernier_wl(struct ice_hw *hw)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- enum ice_status status;
-
- status = ice_write_phy_reg_e822_lp(hw, port, P_REG_WL,
- PTP_VERNIER_WL, true);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, status %d\n",
- port, status);
- return status;
- }
- }
-
- return ICE_SUCCESS;
-}
-
/**
* ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
* @hw: pointer to HW struct
@@ -4829,45 +4870,6 @@ void ice_ptp_unlock(struct ice_hw *hw)
wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
}
-/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
- * @cmd: Timer command
- *
- * Prepare the source timer for an upcoming timer sync command.
- */
-void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u32 cmd_val;
- u8 tmr_idx;
-
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
-
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
- break;
- default:
- ice_warn(hw, "Unknown timer command %u\n", cmd);
- return;
- }
-
- wr32(hw, GLTSYN_CMD, cmd_val);
-}
-
/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 44/70] net/ice/base: complete support for Tx balancing
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (42 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 43/70] net/ice/base: move functions Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 45/70] net/ice/base: update definitions for AQ internal debug dump Qi Zhang
` (26 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Lukasz Czapnik
Add module ID and struct necessary to read and save Tx Scheduler
Topology Tree User Selection data from PFA TLV.
Signed-off-by: Lukasz Czapnik <lukasz.czapnik@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 7f9bdd3cb0..ebffee1b93 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1936,6 +1936,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
+
/* Used for 0x0704 as well as for 0x0705 commands */
struct ice_aqc_nvm_cfg {
u8 cmd_flags;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 45/70] net/ice/base: update definitions for AQ internal debug dump
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (43 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 44/70] net/ice/base: complete support for Tx balancing Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 46/70] net/ice/base: update macros of L2TPv2 ptype value Qi Zhang
` (25 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Dawid Zielinski
Add defines for Queue Mng and Full CSR Space in debug
dump internal data. This defines are used in Lanconf for
debug dump. Added QV_SUPPORT macro in ifdef for ACL.
Signed-off-by: Dawid Zielinski <dawid.zielinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index ebffee1b93..6a1b8a40f2 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -2841,17 +2841,19 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
u8 cluster_id;
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL 1
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL 1
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
/* EMP_DRAM only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
/* AUX_REGS only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
u8 reserved;
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 46/70] net/ice/base: update macros of L2TPv2 ptype value
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (44 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 45/70] net/ice/base: update definitions for AQ internal debug dump Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 47/70] net/ice/base: refine header file include Qi Zhang
` (24 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jie Wang
Because the macros of L2TPv2 packet type value were changed in
ice_ppp-o-l2tpv2-o-udp-1.3.4.0.pkg. So update the macros of L2TPv2
packet type value and the bitmaps of packet types for relevant
protocol header to match the new DDP package.
Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_type.h | 60 ++++++++++++++--------------
drivers/net/ice/base/ice_flow.c | 34 ++++++++--------
2 files changed, 47 insertions(+), 47 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index d45653b637..2855d67831 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -174,36 +174,36 @@ struct ice_fv {
#define ICE_MAC_IPV6_PFCP_SESSION 354
#define ICE_MAC_IPV4_L2TPV3 360
#define ICE_MAC_IPV6_L2TPV3 361
-#define ICE_MAC_IPV4_L2TPV2_CONTROL 392
-#define ICE_MAC_IPV6_L2TPV2_CONTROL 393
-#define ICE_MAC_IPV4_L2TPV2 394
-#define ICE_MAC_IPV6_L2TPV2 395
-#define ICE_MAC_IPV4_PPPOL2TPV2 396
-#define ICE_MAC_IPV6_PPPOL2TPV2 397
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 398
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 399
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 400
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 401
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 402
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 403
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 404
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 405
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 406
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 407
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 408
-#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 409
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 410
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 411
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 412
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 413
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 414
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 415
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 416
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 417
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 418
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 419
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 420
-#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 421
+#define ICE_MAC_IPV4_L2TPV2_CONTROL 396
+#define ICE_MAC_IPV6_L2TPV2_CONTROL 397
+#define ICE_MAC_IPV4_L2TPV2 398
+#define ICE_MAC_IPV6_L2TPV2 399
+#define ICE_MAC_IPV4_PPPOL2TPV2 400
+#define ICE_MAC_IPV6_PPPOL2TPV2 401
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_FRAG 402
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_PAY 403
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_UDP_PAY 404
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_TCP 405
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_SCTP 406
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV4_ICMP 407
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_FRAG 408
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_PAY 409
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_UDP_PAY 410
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_TCP 411
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_SCTP 412
+#define ICE_MAC_IPV4_PPPOL2TPV2_IPV6_ICMPV6 413
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_FRAG 414
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_PAY 415
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_UDP_PAY 416
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_TCP 417
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_SCTP 418
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV4_ICMP 419
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_FRAG 420
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_PAY 421
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_UDP_PAY 422
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_TCP 423
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_SCTP 424
+#define ICE_MAC_IPV6_PPPOL2TPV2_IPV6_ICMPV6 425
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_FRAG 450
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_PAY 451
#define MAC_IPV4_TUN_IPV4_GTPU_IPV4_UDP_PAY 452
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 80e7a447c3..bdc51ca9d2 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -239,7 +239,7 @@ static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00100707,
- 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
+ 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -265,7 +265,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
0x00000000, 0x000FC000, 0x000002A0, 0x00100000,
- 0x00001500, 0x00000000, 0x00000000, 0x00000000,
+ 0x00015000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -279,7 +279,7 @@ static const u32 ice_ptypes_ipv4_ofos_all[] = {
0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
- 0x03FFD000, 0x00000000, 0x02FBEFBC, 0x00000000,
+ 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -291,7 +291,7 @@ static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x001FF800, 0x00100000,
- 0xFC0FC000, 0x00000000, 0xBC0BC0BC, 0x00000BC0,
+ 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -305,7 +305,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
0x00000000, 0x03F00000, 0x00000540, 0x00000000,
- 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
+ 0x0002A000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -319,7 +319,7 @@ static const u32 ice_ptypes_ipv6_ofos_all[] = {
0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
- 0xFC002000, 0x0000003F, 0xBC000000, 0x0002FBEF,
+ 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -331,7 +331,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
- 0x03F00000, 0x0000003F, 0x02F02F00, 0x0002F02F,
+ 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -345,7 +345,7 @@ static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
0x10800000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x000cc000, 0x000002A0, 0x00000000,
- 0x00001500, 0x00000000, 0x00000000, 0x00000000,
+ 0x00015000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -357,7 +357,7 @@ static const u32 ice_ptypes_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
0x00000008, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00139800, 0x00000000,
- 0x8C08C000, 0x00000000, 0x00000000, 0x00000000,
+ 0xC08C0000, 0x00000008, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -371,7 +371,7 @@ static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x42000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x02300000, 0x00000540, 0x00000000,
- 0x00002A00, 0x00000000, 0x00000000, 0x00000000,
+ 0x0002A000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -383,7 +383,7 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = {
0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
0x00000430, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x4e600000, 0x00000000,
- 0x02300000, 0x00000023, 0x00000000, 0x00000000,
+ 0x23000000, 0x00000230, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -409,7 +409,7 @@ static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00410000, 0x908427E0, 0x00100007,
- 0x10410000, 0x00000004, 0x10410410, 0x00004104,
+ 0x0413F000, 0x00000041, 0x10410410, 0x00004104,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -421,7 +421,7 @@ static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00820000, 0x21084000, 0x00000000,
- 0x20820000, 0x00000008, 0x20820820, 0x00008208,
+ 0x08200000, 0x00000082, 0x20820820, 0x00008208,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -433,7 +433,7 @@ static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x01040000, 0x00000000, 0x00000000,
- 0x41040000, 0x00000010, 0x00000000, 0x00000000,
+ 0x10400000, 0x00000104, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -457,7 +457,7 @@ static const u32 ice_ptypes_icmp_il[] = {
0x00000000, 0x02040408, 0x40000102, 0x08101020,
0x00000408, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x42108000, 0x00000000,
- 0x82080000, 0x00000020, 0x00000000, 0x00000000,
+ 0x20800000, 0x00000208, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -920,7 +920,7 @@ static const u32 ice_ptypes_l2tpv2[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0xFFFFFF00, 0x0000003F, 0x00000000, 0x00000000,
+ 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -931,7 +931,7 @@ static const u32 ice_ptypes_ppp[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0xFFFFF000, 0x0000003F, 0x00000000, 0x00000000,
+ 0xFFFF0000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 47/70] net/ice/base: refine header file include
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (45 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 46/70] net/ice/base: update macros of L2TPv2 ptype value Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 48/70] net/ice/base: ignore already exist error Qi Zhang
` (23 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jacob Keller
The ice_switch.h and ice_fdir.h headers include ice_common.h. They
are both themselves included in ice_common.h. This causes a circular
dependency ordering.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_fdir.h | 2 +-
drivers/net/ice/base/ice_switch.c | 1 +
drivers/net/ice/base/ice_switch.h | 2 +-
3 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/base/ice_fdir.h b/drivers/net/ice/base/ice_fdir.h
index 008636072a..d57b1daecd 100644
--- a/drivers/net/ice/base/ice_fdir.h
+++ b/drivers/net/ice/base/ice_fdir.h
@@ -5,7 +5,7 @@
#ifndef _ICE_FDIR_H_
#define _ICE_FDIR_H_
-#include "ice_common.h"
+#include "ice_type.h"
#define ICE_FDIR_IP_PROTOCOLS
#define ICE_IP_PROTO_TCP 6
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 6a94e3fde9..6863696d9d 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -2,6 +2,7 @@
* Copyright(c) 2001-2021 Intel Corporation
*/
+#include "ice_common.h"
#include "ice_switch.h"
#include "ice_flex_type.h"
#include "ice_flow.h"
diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h
index ad1397ba5a..3c05a1531f 100644
--- a/drivers/net/ice/base/ice_switch.h
+++ b/drivers/net/ice/base/ice_switch.h
@@ -5,7 +5,7 @@
#ifndef _ICE_SWITCH_H_
#define _ICE_SWITCH_H_
-#include "ice_common.h"
+#include "ice_type.h"
#include "ice_protocol_type.h"
#define ICE_SW_CFG_MAX_BUF_LEN 2048
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 48/70] net/ice/base: ignore already exist error
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (46 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 47/70] net/ice/base: refine header file include Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 49/70] net/ice/base: clean up with no lookups Qi Zhang
` (22 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Grzegorz Siwik
Ignore ERR_ALREADY_EXISTS error when setting promiscuous mode.
This fix is needed because the driver could set promiscuous mode
when it still has not cleared properly.
Promiscuous mode could be set only once, so setting it second
time will be rejected.
Fixes: 60ff6f5ce2d8 ("net/ice/base: consolidate VF promiscuous mode")
Cc: stable@dpdk.org
Signed-off-by: Grzegorz Siwik <grzegorz.siwik@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 6863696d9d..91a959e10f 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -6280,7 +6280,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
status = _ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id,
lport, sw);
- if (status)
+ if (status && status != ICE_ERR_ALREADY_EXISTS)
break;
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 49/70] net/ice/base: clean up with no lookups
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (47 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 48/70] net/ice/base: ignore already exist error Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 50/70] net/ice/base: add support for Auto FEC with FEC disabled Qi Zhang
` (21 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jesse Brandeburg
The add rule functionality works fine with a NULL lookups parameter.
However when running the undefined behavior sanitizer it noticed that
the function could trigger a memcpy from a NULL target.
Fix the code to handle NULL lkups and a zero lkups_cnt variable more
explicitly, and clean up the test to just directly pass a NULL value
instead of allocating a stack variable assigned to NULL and passing
that as a pointer.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 91a959e10f..01441211ff 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -9002,9 +9002,13 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto err_ice_add_adv_rule;
}
- adv_fltr->lkups = (struct ice_adv_lkup_elem *)
- ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
- ICE_NONDMA_TO_NONDMA);
+ if (lkups_cnt) {
+ adv_fltr->lkups = (struct ice_adv_lkup_elem *)
+ ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
+ ICE_NONDMA_TO_NONDMA);
+ } else {
+ adv_fltr->lkups = NULL;
+ }
if (!adv_fltr->lkups && !prof_rule) {
status = ICE_ERR_NO_MEMORY;
goto err_ice_add_adv_rule;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 50/70] net/ice/base: add support for Auto FEC with FEC disabled
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (48 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 49/70] net/ice/base: clean up with no lookups Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 51/70] net/ice/base: update PHY type high max index Qi Zhang
` (20 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Paul Greenwalt
The default Link Establishment State Machine (LESM) behavior does
not allow the use of FEC disable mode if the media does not support
FEC disabled. However users may want to override this behavior.
Add support for setting Auto FEC with FEC disabled.
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 2 +
drivers/net/ice/base/ice_common.c | 129 +++++++++++++++++++-------
drivers/net/ice/base/ice_common.h | 2 +
drivers/net/ice/base/ice_type.h | 12 ++-
4 files changed, 111 insertions(+), 34 deletions(-)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 6a1b8a40f2..dc72d70dfe 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1439,6 +1439,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define ICE_AQC_PHY_FEC_DIS BIT(5)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0)
@@ -3275,6 +3276,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
+ ice_execute_pending_lldp_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 2014f8361d..9a41f36fed 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -3494,8 +3494,12 @@ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
*/
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
{
- if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
- return ICE_FEC_AUTO;
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
+ if (fec_options & ICE_AQC_PHY_FEC_DIS)
+ return ICE_FEC_DIS_AUTO;
+ else
+ return ICE_FEC_AUTO;
+ }
if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
@@ -3788,6 +3792,12 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
/* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
+ case ICE_FEC_DIS_AUTO:
+ /* Set No FEC and auto FEC */
+ if (!ice_fw_supports_fec_dis_auto(hw))
+ return ICE_ERR_NOT_SUPPORTED;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
+ /* fall-through */
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
@@ -5750,26 +5760,70 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
+ return true;
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ } else if (hw->api_maj_ver > maj) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_is_fw_min_ver
+ * @hw: pointer to the hardware structure
+ * @branch: branch version
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
+ *
+ * Checks if the firmware is minimum version
+ */
+static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
+ u8 patch)
+{
+ if (hw->fw_branch == branch) {
+ if (hw->fw_maj_ver > maj)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ if (hw->fw_maj_ver == maj) {
+ if (hw->fw_min_ver > min)
+ return true;
+ if (hw->fw_min_ver == min && hw->fw_patch >= patch)
+ return true;
+ }
+ } else if (hw->fw_branch > branch) {
return true;
}
return false;
}
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
@@ -5897,19 +5951,12 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
{
- if (hw->mac_type != ICE_MAC_E810)
+ if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5938,6 +5985,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
+/**
+ * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
+ * @hw: pointer to HW struct
+ */
+enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
@@ -5946,16 +6006,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
+}
+
+/**
+ * ice_fw_supports_fec_dis_auto
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports FEC disable in Auto FEC mode
+ */
+bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
+{
+ return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
+ ICE_FW_FEC_DIS_AUTO_MAJ,
+ ICE_FW_FEC_DIS_AUTO_MIN,
+ ICE_FW_FEC_DIS_AUTO_PATCH);
}
/**
* ice_is_fw_auto_drop_supported
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index b15cf240f9..ac13a979b1 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -167,6 +167,7 @@ enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
+bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
@@ -282,6 +283,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw);
enum ice_status
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index cdfef47e94..af56849482 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -180,7 +180,8 @@ enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
ICE_FEC_BASER,
- ICE_FEC_AUTO
+ ICE_FEC_AUTO,
+ ICE_FEC_DIS_AUTO
};
struct ice_phy_cache_mode_data {
@@ -1514,9 +1515,14 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+
+/* FW version for FEC disable in Auto FEC mode */
+#define ICE_FW_FEC_DIS_AUTO_BRANCH 1
+#define ICE_FW_FEC_DIS_AUTO_MAJ 7
+#define ICE_FW_FEC_DIS_AUTO_MIN 0
+#define ICE_FW_FEC_DIS_AUTO_PATCH 5
+
/* AQ API version for FW auto drop reports */
#define ICE_FW_API_AUTO_DROP_MAJ 1
#define ICE_FW_API_AUTO_DROP_MIN 4
-
-
#endif /* _ICE_TYPE_H_ */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 51/70] net/ice/base: update PHY type high max index
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (49 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 50/70] net/ice/base: add support for Auto FEC with FEC disabled Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 52/70] net/ice/base: clean the main timer command register Qi Zhang
` (19 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Paul Greenwalt
ICE_PHY_TYPE_HIGH_MAX_INDEX should be the maximum index value and
not the length/number of ICE_PHY_TYPE_HIGH.
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index dc72d70dfe..e1a6847157 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1398,7 +1398,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 52/70] net/ice/base: clean the main timer command register
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (50 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 51/70] net/ice/base: update PHY type high max index Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 53/70] net/ice/base: add support for custom WPC and LGB NICs Qi Zhang
` (18 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Sergey Temerkhanov
Clean the main timer command register after use to avoid residual
command execution, such as re-initialization of the main timer.
Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index dfb9d08224..f5ebf5f328 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -3751,6 +3751,7 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
/* Issue the sync to start the ICE_PTP_READ_TIME capture */
ice_ptp_exec_tmr_cmd(hw);
+ ice_ptp_clean_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
@@ -3825,6 +3826,7 @@ static enum ice_status ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
/* Issue the sync to activate the time adjustment */
ice_ptp_exec_tmr_cmd(hw);
+ ice_ptp_clean_cmd(hw);
/* Re-capture the timer values to flush the command registers and
* verify that the time was properly adjusted.
@@ -3920,6 +3922,7 @@ ice_start_phy_timer_e822(struct ice_hw *hw, u8 port, bool bypass)
u64 incval;
u8 tmr_idx;
+ ice_ptp_clean_cmd(hw);
tmr_idx = ice_get_ptp_src_clock_index(hw);
status = ice_stop_phy_timer_e822(hw, port, false);
@@ -4913,6 +4916,7 @@ ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd, bool lock_sbq)
* commands synchronously
*/
ice_ptp_exec_tmr_cmd(hw);
+ ice_ptp_clean_cmd(hw);
return ICE_SUCCESS;
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 53/70] net/ice/base: add support for custom WPC and LGB NICs
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (51 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 52/70] net/ice/base: clean the main timer command register Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 54/70] net/ice/base: add generic MAC with 3K signature segment Qi Zhang
` (17 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Michal Michalik
There are few custom Westport Channel (WPC) and Logan Beach (LGB)
network interface cards (NICs) - add their subdevice IDs to be
able to distinguish them.
Signed-off-by: Michal Michalik <michal.michalik@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 15 ++++++++++++---
drivers/net/ice/base/ice_devids.h | 4 ++++
2 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 9a41f36fed..c3024dd0b7 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -216,13 +216,22 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T5:
return true;
+ }
break;
case ICE_DEV_ID_E810C_QSFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T5:
+ case ICE_SUBDEV_ID_E810T6:
return true;
+ }
break;
default:
break;
diff --git a/drivers/net/ice/base/ice_devids.h b/drivers/net/ice/base/ice_devids.h
index 96dbb92e0a..937111844d 100644
--- a/drivers/net/ice/base/ice_devids.h
+++ b/drivers/net/ice/base/ice_devids.h
@@ -23,6 +23,10 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x02E9
+#define ICE_SUBDEV_ID_E810T4 0x02EA
+#define ICE_SUBDEV_ID_E810T5 0x0010
+#define ICE_SUBDEV_ID_E810T6 0x0012
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 54/70] net/ice/base: add generic MAC with 3K signature segment
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (52 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 53/70] net/ice/base: add support for custom WPC and LGB NICs Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 55/70] net/ice/base: enable RSS support for L2TPv2 session ID Qi Zhang
` (16 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Grzegorz Nitka
Define new type id ICE_MAC_GENERIC_3k in ice_mac_type enum, to
distinguish devices which use RSA-3K/SHA-384 segment signature type.
Use 3k signinig type for E824S device.
Signed-off-by: Grzegorz Nitka <grzegorz.nitka@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index c3024dd0b7..44592d20bf 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -192,7 +192,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
- return hw->mac_type == ICE_MAC_GENERIC;
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K);
}
/**
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 55/70] net/ice/base: enable RSS support for L2TPv2 session ID
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (53 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 54/70] net/ice/base: add generic MAC with 3K signature segment Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 56/70] net/ice/base: enable FDIR support for L2TPv2 Qi Zhang
` (15 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jie Wang
Add L2TPv2 session ID field support for RSS.
Enable L2TPv2 non-tunneled packet types for UDP protocol header
bitmaps.
Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flow.c | 12 ++++++++++++
drivers/net/ice/base/ice_flow.h | 14 ++++++++++++++
2 files changed, 26 insertions(+)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index bdc51ca9d2..182fac08a9 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -38,6 +38,8 @@
#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
#define ICE_FLOW_FLD_SZ_VXLAN_VNI 4
#define ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID 2
+#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2
+#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
@@ -229,6 +231,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP_ECPRI_TP0, 12,
ICE_FLOW_FLD_SZ_ECPRI_TP0_PC_ID),
+ /* L2TPV2 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12,
+ ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID),
+ /* L2TPV2_LEN */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14,
+ ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -1492,6 +1502,8 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
case ICE_FLOW_FIELD_IDX_GTPU_UP_QFI:
case ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI:
+ case ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID:
+ case ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID:
/* GTP is accessed through UDP OF protocol */
prot_id = ICE_PROT_UDP_OF;
break;
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index f941ce4333..5729392362 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -149,6 +149,16 @@
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID)
+
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID)
+
#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12
#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16
#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8
@@ -297,6 +307,10 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_ECPRI_TP0_PC_ID,
/* UDP_ECPRI_TP0 */
ICE_FLOW_FIELD_IDX_UDP_ECPRI_TP0_PC_ID,
+ /* L2TPV2 SESSION ID*/
+ ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID,
+ /* L2TPV2_LEN SESSION ID */
+ ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 56/70] net/ice/base: enable FDIR support for L2TPv2
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (54 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 55/70] net/ice/base: enable RSS support for L2TPv2 session ID Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 57/70] net/ice/base: add GRE Tap tunnel type Qi Zhang
` (14 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Jie Wang
Add L2TPv2(include PPP over L2TPv2) support for FDIR.
And add support PPPoL2TPv2oUDP with inner IPV4/IPV6/UDP/TCP for
FDIR.
The supported L2TPv2 packets are defined as below:
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP
Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_fdir.c | 711 +++++++++++++++++++++++++++++++-
drivers/net/ice/base/ice_fdir.h | 19 +
drivers/net/ice/base/ice_type.h | 27 ++
3 files changed, 755 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/base/ice_fdir.c b/drivers/net/ice/base/ice_fdir.c
index 6bbab0c843..a554379075 100644
--- a/drivers/net/ice/base/ice_fdir.c
+++ b/drivers/net/ice/base/ice_fdir.c
@@ -1827,6 +1827,289 @@ static const u8 ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt[] = {
0x00, 0x00, 0x00, 0x00,
};
+/* IPV4 L2TPV2 control */
+static const u8 ice_fdir_ipv4_l2tpv2_ctrl_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0xc2, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x14,
+ 0x2c, 0x6b, 0xc8, 0x02, 0x00, 0x0c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* IPV4 L2TPV2 */
+static const u8 ice_fdir_ipv4_l2tpv2_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x28, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0xc2, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x14,
+ 0x2c, 0x6b, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 */
+static const u8 ice_fdir_ipv4_l2tpv2_ppp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x26, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0xc4, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x12,
+ 0xf5, 0x77, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV4 */
+static const u8 ice_fdir_ipv4_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x3a, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0xb0, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x26,
+ 0xf5, 0x2e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x14,
+ 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7,
+ 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01,
+ 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV4 UDP */
+static const u8 ice_fdir_udp4_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x42, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0xa8, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x2e,
+ 0xf3, 0x3a, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x1c,
+ 0x00, 0x01, 0x00, 0x00, 0x40, 0x11, 0x7c, 0xce,
+ 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01,
+ 0x00, 0x35, 0x00, 0x35, 0x00, 0x08, 0x01, 0x72,
+ 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV4 TCP */
+static const u8 ice_fdir_tcp4_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x3a,
+ 0xf3, 0x23, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x21, 0x45, 0x00, 0x00, 0x28,
+ 0x00, 0x01, 0x00, 0x00, 0x40, 0x06, 0x7c, 0xcd,
+ 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01,
+ 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, 0x20, 0x00,
+ 0x91, 0x7c, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV6 */
+static const u8 ice_fdir_ipv6_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x4e, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0x9c, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x3a,
+ 0x59, 0x8e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x3b, 0x40, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV6 UDP */
+static const u8 ice_fdir_udp6_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x56, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0x94, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x42,
+ 0x83, 0x91, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00,
+ 0x00, 0x08, 0x11, 0x40, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x35, 0x00, 0x35,
+ 0x00, 0x08, 0xff, 0x72, 0x00, 0x00,
+};
+
+/* IPV4 PPPOL2TPV2 IPV6 TCP */
+static const u8 ice_fdir_tcp6_l2tpv2_ppp4_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x62, 0x00, 0x01, 0x00, 0x00, 0x40, 0x11,
+ 0x7c, 0x88, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00,
+ 0x00, 0x01, 0x06, 0xa5, 0x06, 0xa5, 0x00, 0x4e,
+ 0x8e, 0x6e, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0x00, 0x57, 0x60, 0x00, 0x00, 0x00,
+ 0x00, 0x14, 0x06, 0x40, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x14, 0x00, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00, 0x8f, 0x7d, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+/* IPV6 L2TPV2 control */
+static const u8 ice_fdir_ipv6_l2tpv2_ctrl_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x14, 0x2a, 0x6c, 0xc8, 0x02,
+ 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+/* IPV6 L2TPV2 */
+static const u8 ice_fdir_ipv6_l2tpv2_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x14, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x14, 0x2a, 0x6c, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 */
+static const u8 ice_fdir_ipv6_l2tpv2_ppp_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x12, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x12, 0xf3, 0x78, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV4 */
+static const u8 ice_fdir_ipv4_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x26, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x26, 0xf3, 0x2f, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21,
+ 0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01,
+ 0x7f, 0x00, 0x00, 0x01, 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV4 UDP */
+static const u8 ice_fdir_udp4_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x2e, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x2e, 0xf1, 0x3b, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21,
+ 0x45, 0x00, 0x00, 0x1c, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x7c, 0xce, 0x7f, 0x00, 0x00, 0x01,
+ 0x7f, 0x00, 0x00, 0x01, 0x00, 0x35, 0x00, 0x35,
+ 0x00, 0x08, 0x01, 0x72, 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV4 TCP */
+static const u8 ice_fdir_tcp4_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x3a, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x3a, 0xf1, 0x24, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x21,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x06, 0x7c, 0xcd, 0x7f, 0x00, 0x00, 0x01,
+ 0x7f, 0x00, 0x00, 0x01, 0x00, 0x14, 0x00, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00, 0x91, 0x7c, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV6 */
+static const u8 ice_fdir_ipv6_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x3a, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x3a, 0x57, 0x8f, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV6 UDP */
+static const u8 ice_fdir_udp6_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x42, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x42, 0x81, 0x92, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x35, 0x00, 0x35, 0x00, 0x08, 0xff, 0x72,
+ 0x00, 0x00,
+};
+
+/* IPV6 PPPOL2TPV2 IPV6 TCP */
+static const u8 ice_fdir_tcp6_l2tpv2_ppp6_pkt[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x86, 0xdd, 0x60, 0x00,
+ 0x00, 0x00, 0x00, 0x4e, 0x11, 0x40, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0xa5,
+ 0x06, 0xa5, 0x00, 0x4e, 0x8c, 0x6f, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x03, 0x00, 0x57,
+ 0x60, 0x00, 0x00, 0x00, 0x00, 0x14, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x14, 0x00, 0x50, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, 0x20, 0x00,
+ 0x8f, 0x7d, 0x00, 0x00, 0x00, 0x00,
+};
+
static const u8 ice_fdir_tcpv6_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
@@ -2912,6 +3195,142 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
sizeof(ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt),
ice_fdir_tcp6_gtpu4_eh_up_gre6_pkt,
},
+ /* IPV4 L2TPV2 CONTROL */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL,
+ sizeof(ice_fdir_ipv4_l2tpv2_ctrl_pkt),
+ ice_fdir_ipv4_l2tpv2_ctrl_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv2_ctrl_pkt),
+ ice_fdir_ipv4_l2tpv2_ctrl_pkt,
+ },
+ /* IPV4 L2TPV2 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2,
+ sizeof(ice_fdir_ipv4_l2tpv2_pkt),
+ ice_fdir_ipv4_l2tpv2_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv2_pkt),
+ ice_fdir_ipv4_l2tpv2_pkt,
+ },
+ /* IPV4 L2TPV2 PPP */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp_pkt,
+ },
+ /* IPV4 L2TPV2 PPP IPV4 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp4_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp4_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP,
+ sizeof(ice_fdir_udp4_l2tpv2_ppp4_pkt),
+ ice_fdir_udp4_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_udp4_l2tpv2_ppp4_pkt),
+ ice_fdir_udp4_l2tpv2_ppp4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP,
+ sizeof(ice_fdir_tcp4_l2tpv2_ppp4_pkt),
+ ice_fdir_tcp4_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_tcp4_l2tpv2_ppp4_pkt),
+ ice_fdir_tcp4_l2tpv2_ppp4_pkt,
+ },
+ /* IPV4 L2TPV2 PPP IPV6 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp4_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp4_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP,
+ sizeof(ice_fdir_udp6_l2tpv2_ppp4_pkt),
+ ice_fdir_udp6_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_udp6_l2tpv2_ppp4_pkt),
+ ice_fdir_udp6_l2tpv2_ppp4_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP,
+ sizeof(ice_fdir_tcp6_l2tpv2_ppp4_pkt),
+ ice_fdir_tcp6_l2tpv2_ppp4_pkt,
+ sizeof(ice_fdir_tcp6_l2tpv2_ppp4_pkt),
+ ice_fdir_tcp6_l2tpv2_ppp4_pkt,
+ },
+ /* IPV6 L2TPV2 CONTROL */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL,
+ sizeof(ice_fdir_ipv6_l2tpv2_ctrl_pkt),
+ ice_fdir_ipv6_l2tpv2_ctrl_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv2_ctrl_pkt),
+ ice_fdir_ipv6_l2tpv2_ctrl_pkt,
+ },
+ /* IPV6 L2TPV2 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2,
+ sizeof(ice_fdir_ipv6_l2tpv2_pkt),
+ ice_fdir_ipv6_l2tpv2_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv2_pkt),
+ ice_fdir_ipv6_l2tpv2_pkt,
+ },
+ /* IPV6 L2TPV2 PPP */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp_pkt,
+ },
+ /* IPV6 L2TPV2 PPP IPV4 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp6_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_ipv4_l2tpv2_ppp6_pkt),
+ ice_fdir_ipv4_l2tpv2_ppp6_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP,
+ sizeof(ice_fdir_udp4_l2tpv2_ppp6_pkt),
+ ice_fdir_udp4_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_udp4_l2tpv2_ppp6_pkt),
+ ice_fdir_udp4_l2tpv2_ppp6_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP,
+ sizeof(ice_fdir_tcp4_l2tpv2_ppp6_pkt),
+ ice_fdir_tcp4_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_tcp4_l2tpv2_ppp6_pkt),
+ ice_fdir_tcp4_l2tpv2_ppp6_pkt,
+ },
+ /* IPV6 L2TPV2 PPP IPV6 */
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp6_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_ipv6_l2tpv2_ppp6_pkt),
+ ice_fdir_ipv6_l2tpv2_ppp6_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP,
+ sizeof(ice_fdir_udp6_l2tpv2_ppp6_pkt),
+ ice_fdir_udp6_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_udp6_l2tpv2_ppp6_pkt),
+ ice_fdir_udp6_l2tpv2_ppp6_pkt,
+ },
+ {
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP,
+ sizeof(ice_fdir_tcp6_l2tpv2_ppp6_pkt),
+ ice_fdir_tcp6_l2tpv2_ppp6_pkt,
+ sizeof(ice_fdir_tcp6_l2tpv2_ppp6_pkt),
+ ice_fdir_tcp6_l2tpv2_ppp6_pkt,
+ },
{
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
@@ -3290,6 +3709,111 @@ ice_fdir_get_open_tunnel_port(struct ice_hw *hw, enum ice_fltr_ptype flow,
return ICE_SUCCESS;
}
+/**
+ * ice_fdir_gen_l2tpv2_pkt - generate L2TPv2 training packet
+ * @pkt: pointer to return filter packet
+ * @l2tpv2_data: pointer to ice_fdir_l2tpv2 data structure
+ * @idx: the matched packet index of FDIR training packet table
+ * @offset: position of end byte for PPPoL2TPv2 packet
+ * @tun: true implies generate a tunnel packet
+ */
+static u16
+ice_fdir_gen_l2tpv2_pkt(u8 *pkt, struct ice_fdir_l2tpv2 *l2tpv2_data,
+ u16 idx, u16 offset, bool tun)
+{
+ u16 flags_version;
+ u16 offset_size;
+ u16 pos;
+
+ /* get outer packet end pos, 10 = l2tpv2 default len 6 + ppp len 4 */
+ pos = offset - ICE_L2TPV2_PKT_LENGTH - ICE_PPP_PKT_LENGTH;
+
+ /* copy outer packet */
+ ice_memcpy(pkt, ice_fdir_pkt[idx].tun_pkt, pos, ICE_NONDMA_TO_NONDMA);
+
+ /* copy l2tpv2 packet common header */
+ ice_memcpy(pkt + pos, &l2tpv2_data->flags_version,
+ sizeof(l2tpv2_data->flags_version),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->flags_version);
+
+ flags_version = BE16_TO_CPU(l2tpv2_data->flags_version);
+ if (flags_version == 0) {
+ l2tpv2_data->flags_version = CPU_TO_BE16(ICE_L2TPV2_FLAGS_VER);
+ flags_version = ICE_L2TPV2_FLAGS_VER;
+ }
+
+ /* copy l2tpv2 length */
+ if (flags_version & ICE_L2TPV2_FLAGS_LEN) {
+ ice_memcpy(pkt + pos, &l2tpv2_data->length,
+ sizeof(l2tpv2_data->length),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->length);
+ }
+
+ /* copy l2tpv2 tunnel id */
+ ice_memcpy(pkt + pos, &l2tpv2_data->tunnel_id,
+ sizeof(l2tpv2_data->tunnel_id),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->tunnel_id);
+
+ /* copy l2tpv2 session id */
+ ice_memcpy(pkt + pos, &l2tpv2_data->session_id,
+ sizeof(l2tpv2_data->session_id),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->session_id);
+
+ /* copy l2tpv2 ns + nr */
+ if (flags_version & ICE_L2TPV2_FLAGS_SEQ) {
+ ice_memcpy(pkt + pos, &l2tpv2_data->ns,
+ sizeof(l2tpv2_data->ns),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->ns);
+
+ ice_memcpy(pkt + pos, &l2tpv2_data->nr,
+ sizeof(l2tpv2_data->nr),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->nr);
+ }
+
+ /* copy l2tpv2 offset size + offset padding */
+ if (flags_version & ICE_L2TPV2_FLAGS_OFF) {
+ ice_memcpy(pkt + pos, &l2tpv2_data->offset_size,
+ sizeof(l2tpv2_data->offset_size),
+ ICE_NONDMA_TO_NONDMA);
+ pos += sizeof(l2tpv2_data->offset_size);
+ /* insert 0 into offset padding */
+ offset_size = BE16_TO_CPU(l2tpv2_data->offset_size);
+ if (offset_size > ICE_FDIR_MAX_RAW_PKT_SIZE -
+ ice_fdir_pkt[idx].tun_pkt_len) {
+ offset_size = ICE_FDIR_MAX_RAW_PKT_SIZE -
+ ice_fdir_pkt[idx].tun_pkt_len;
+ }
+ ice_memset(pkt + pos, 0, offset_size, ICE_NONDMA_MEM);
+ pos += offset_size;
+ }
+
+ if (ice_fdir_pkt[idx].tun_pkt_len > offset) {
+ /* copy ppp packet */
+ ice_memcpy(pkt + pos,
+ ice_fdir_pkt[idx].tun_pkt + offset -
+ ICE_PPP_PKT_LENGTH,
+ ICE_PPP_PKT_LENGTH,
+ ICE_NONDMA_TO_NONDMA);
+ pos += ICE_PPP_PKT_LENGTH;
+
+ /* copy inner packets */
+ if (tun) {
+ ice_memcpy(pkt + pos,
+ ice_fdir_pkt[idx].tun_pkt + offset,
+ ice_fdir_pkt[idx].tun_pkt_len - offset,
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+
+ return pos;
+}
+
/**
* ice_fdir_get_gen_prgm_pkt - generate a training packet
* @hw: pointer to the hardware structure
@@ -3306,6 +3830,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u16 tnl_port;
u8 *loc;
u16 idx;
+ u16 flags_version;
+ u16 pos;
+ u16 offset;
if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
switch (input->ip.v4.proto) {
@@ -3346,9 +3873,29 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
break;
if (idx == ICE_FDIR_NUM_PKT)
return ICE_ERR_PARAM;
+
if (!tun) {
- ice_memcpy(pkt, ice_fdir_pkt[idx].pkt,
- ice_fdir_pkt[idx].pkt_len, ICE_NONDMA_TO_NONDMA);
+ switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP:
+ offset = ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF;
+ ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data,
+ idx, offset, tun);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP:
+ offset = ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF;
+ ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data,
+ idx, offset, tun);
+ break;
+ default:
+ ice_memcpy(pkt, ice_fdir_pkt[idx].pkt,
+ ice_fdir_pkt[idx].pkt_len,
+ ICE_NONDMA_TO_NONDMA);
+ break;
+ }
loc = pkt;
} else {
if (!ice_fdir_pkt[idx].tun_pkt)
@@ -3479,6 +4026,28 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ICE_NONDMA_TO_NONDMA);
loc = &pkt[ICE_FDIR_V6_V4_GTPOGRE_EH_PKT_OFF];
break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP:
+ offset = ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF;
+ pos = ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data,
+ idx, offset, tun);
+ loc = &pkt[pos];
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP:
+ offset = ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF;
+ pos = ice_fdir_gen_l2tpv2_pkt(pkt, &input->l2tpv2_data,
+ idx, offset, tun);
+ loc = &pkt[pos];
+ break;
default:
if (ice_fdir_get_open_tunnel_port(hw, flow, &tnl_port))
return ICE_ERR_DOES_NOT_EXIST;
@@ -4021,6 +4590,138 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET,
input->ip.v6.tc);
break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL:
+ ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data_outer.src_mac);
+ ice_pkt_insert_u16(loc, ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2:
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP:
+ ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data_outer.src_mac);
+ flags_version = BE16_TO_CPU(input->l2tpv2_data.flags_version);
+ if (flags_version & ICE_L2TPV2_FLAGS_LEN) {
+ ice_pkt_insert_u16(loc,
+ ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ } else {
+ ice_pkt_insert_u16(loc,
+ ICE_IPV4_L2TPV2_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL:
+ ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data_outer.src_mac);
+ ice_pkt_insert_u16(loc, ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP:
+ ice_pkt_insert_mac_addr(loc, input->ext_data_outer.dst_mac);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN,
+ input->ext_data_outer.src_mac);
+ flags_version = BE16_TO_CPU(input->l2tpv2_data.flags_version);
+ if (flags_version & ICE_L2TPV2_FLAGS_LEN) {
+ ice_pkt_insert_u16(loc,
+ ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ } else {
+ ice_pkt_insert_u16(loc,
+ ICE_IPV6_L2TPV2_SESS_ID_OFFSET,
+ input->l2tpv2_data.session_id);
+ }
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4:
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET,
+ input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET,
+ input->ip.v4.ttl);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_PROTO_OFFSET,
+ input->ip.v4.proto);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_UDP4_NO_MAC_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET,
+ input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET,
+ input->ip.v4.ttl);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP:
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v4.src_ip);
+ ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_DST_PORT_OFFSET,
+ input->ip.v4.src_port);
+ ice_pkt_insert_u32(loc, ICE_IPV4_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v4.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_TCP4_NO_MAC_SRC_PORT_OFFSET,
+ input->ip.v4.dst_port);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TOS_OFFSET,
+ input->ip.v4.tos);
+ ice_pkt_insert_u8(loc, ICE_IPV4_NO_MAC_TTL_OFFSET,
+ input->ip.v4.ttl);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET,
+ input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET,
+ input->ip.v6.hlim);
+ ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_PROTO_OFFSET,
+ input->ip.v6.proto);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_UDP6_NO_MAC_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET,
+ input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET,
+ input->ip.v6.hlim);
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP:
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_DST_ADDR_OFFSET,
+ input->ip.v6.src_ip);
+ ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_NO_MAC_SRC_ADDR_OFFSET,
+ input->ip.v6.dst_ip);
+ ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_DST_PORT_OFFSET,
+ input->ip.v6.src_port);
+ ice_pkt_insert_u16(loc, ICE_TCP6_NO_MAC_SRC_PORT_OFFSET,
+ input->ip.v6.dst_port);
+ ice_pkt_insert_u8_tc(loc, ICE_IPV6_NO_MAC_TC_OFFSET,
+ input->ip.v6.tc);
+ ice_pkt_insert_u8(loc, ICE_IPV6_NO_MAC_HLIM_OFFSET,
+ input->ip.v6.hlim);
+ break;
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
input->ip.v6.src_ip);
@@ -4252,6 +4953,12 @@ ice_fdir_comp_rules_extended(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
return false;
if (memcmp(&a->ecpri_mask, &b->ecpri_mask, sizeof(a->ecpri_mask)))
return false;
+ if (memcmp(&a->l2tpv2_data.session_id, &b->l2tpv2_data.session_id,
+ sizeof(a->l2tpv2_data.session_id)))
+ return false;
+ if (memcmp(&a->l2tpv2_mask.session_id, &b->l2tpv2_mask.session_id,
+ sizeof(a->l2tpv2_mask.session_id)))
+ return false;
return true;
}
diff --git a/drivers/net/ice/base/ice_fdir.h b/drivers/net/ice/base/ice_fdir.h
index d57b1daecd..ced880fff1 100644
--- a/drivers/net/ice/base/ice_fdir.h
+++ b/drivers/net/ice/base/ice_fdir.h
@@ -26,6 +26,8 @@
#define ICE_FDIR_V4_V6_GTPOGRE_EH_PKT_OFF 102
#define ICE_FDIR_V6_V4_GTPOGRE_EH_PKT_OFF 102
#define ICE_FDIR_V6_V6_GTPOGRE_EH_PKT_OFF 122
+#define ICE_FDIR_IPV4_L2TPV2_PPP_PKT_OFF 52
+#define ICE_FDIR_IPV6_L2TPV2_PPP_PKT_OFF 72
#define ICE_FDIR_TUN_PKT_OFF 50
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
@@ -96,6 +98,10 @@
#define ICE_IPV4_VXLAN_VNI_OFFSET 46
#define ICE_ECPRI_TP0_PC_ID_OFFSET 18
#define ICE_IPV4_UDP_ECPRI_TP0_PC_ID_OFFSET 46
+#define ICE_IPV4_L2TPV2_SESS_ID_OFFSET 46
+#define ICE_IPV6_L2TPV2_SESS_ID_OFFSET 66
+#define ICE_IPV4_L2TPV2_LEN_SESS_ID_OFFSET 48
+#define ICE_IPV6_L2TPV2_LEN_SESS_ID_OFFSET 68
#define ICE_FDIR_MAX_FLTRS 16384
@@ -222,6 +228,16 @@ struct ice_fdir_ecpri {
__be16 pc_id;
};
+struct ice_fdir_l2tpv2 {
+ __be16 flags_version;
+ __be16 length;
+ __be16 tunnel_id;
+ __be16 session_id;
+ __be16 ns;
+ __be16 nr;
+ __be16 offset_size;
+};
+
struct ice_fdir_extra {
u8 dst_mac[ETH_ALEN]; /* dest MAC address */
u8 src_mac[ETH_ALEN]; /* src MAC address */
@@ -261,6 +277,9 @@ struct ice_fdir_fltr {
struct ice_fdir_ecpri ecpri_data;
struct ice_fdir_ecpri ecpri_mask;
+ struct ice_fdir_l2tpv2 l2tpv2_data;
+ struct ice_fdir_l2tpv2 l2tpv2_mask;
+
struct ice_fdir_extra ext_data;
struct ice_fdir_extra ext_mask;
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index af56849482..6d0adf0dd1 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -282,6 +282,15 @@ struct ice_phy_info {
#define ICE_MAX_NUM_MIRROR_RULES 64
+#define ICE_L2TPV2_FLAGS_CTRL 0x8000
+#define ICE_L2TPV2_FLAGS_LEN 0x4000
+#define ICE_L2TPV2_FLAGS_SEQ 0x0800
+#define ICE_L2TPV2_FLAGS_OFF 0x0200
+#define ICE_L2TPV2_FLAGS_VER 0x0002
+
+#define ICE_L2TPV2_PKT_LENGTH 6
+#define ICE_PPP_PKT_LENGTH 4
+
/* protocol enumeration for filters */
enum ice_fltr_ptype {
/* NONE - used for undef/error */
@@ -479,6 +488,24 @@ enum ice_fltr_ptype {
ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP,
ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_CONTROL,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV4_L2TPV2_PPP_IPV6_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_CONTROL,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV4_TCP,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_L2TPV2_PPP_IPV6_TCP,
ICE_FLTR_PTYPE_MAX,
};
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 57/70] net/ice/base: add GRE Tap tunnel type
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (55 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 56/70] net/ice/base: enable FDIR support for L2TPv2 Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 58/70] net/ice/base: fix wrong inputset of GTPoGRE packet Qi Zhang
` (13 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Michal Swiatkowski
Added new tunnel type to support NvGRE
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_type.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 2855d67831..070d2aeb1e 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -441,6 +441,7 @@ struct ice_prof_redir_section {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ TNL_GRETAP,
TNL_ECPRI,
TNL_GTP,
TNL_LAST = 0xFF,
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 58/70] net/ice/base: fix wrong inputset of GTPoGRE packet
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (56 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 57/70] net/ice/base: add GRE Tap tunnel type Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 59/70] net/ice/base: add unload flag for control queue shutdown Qi Zhang
` (12 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, stable, Kevin Liu
For GTPoGRE, When setting the prot_id of prot, it should be
set to second inner.
Fixes: 34a0e7c44f2b ("net/ice/base: improve flow director masking")
Cc: stable@dpdk.org
Signed-off-by: Kevin Liu <kevinx.liu@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flow.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 182fac08a9..33e97ec333 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -1404,7 +1404,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_IPV4_TTL:
case ICE_FLOW_FIELD_IDX_IPV4_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
-
+ if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
+ params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
+ seg == 1)
+ prot_id = ICE_PROT_IPV4_IL_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
@@ -1432,7 +1435,10 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = seg == 0 ?
ICE_PROT_IPV6_NEXT_PROTO :
ICE_PROT_IPV6_IL;
-
+ if (params->prof->segs[0].hdrs & ICE_FLOW_SEG_HDR_GRE &&
+ params->prof->segs[1].hdrs & ICE_FLOW_SEG_HDR_GTPU &&
+ seg == 1)
+ prot_id = ICE_PROT_IPV6_IL_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 59/70] net/ice/base: add unload flag for control queue shutdown
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (57 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 58/70] net/ice/base: fix wrong inputset of GTPoGRE packet Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 60/70] net/ice/base: update comment for overloaded GCO bit Qi Zhang
` (11 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Piotr Gardocki
Admin queue command for shutdown AQ contains flag to indicate
driver unload. However the flag is always set in driver, even
for the resets. It causes Firmware to consider driver as unloaded
once the PF reset is triggered on all ports of device. Firmware then
restores default configuration of some features like Tx Balancing,
which is not expected behavior.
This patch added an additional function parameter to indicate
driver unload.
Signed-off-by: Piotr Gardocki <piotrx.gardocki@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.h | 2 +-
drivers/net/ice/base/ice_controlq.c | 19 +++++++++++--------
drivers/net/ice/ice_ethdev.c | 2 +-
3 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index ac13a979b1..9101ae11af 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -32,7 +32,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index d83d0d76d0..8a6311572c 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -661,10 +661,12 @@ static bool ice_is_sbq_supported(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -674,7 +676,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
@@ -693,21 +695,22 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -741,7 +744,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -822,7 +825,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 551be3566f..172eb2fbdb 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2521,7 +2521,7 @@ ice_dev_close(struct rte_eth_dev *dev)
ice_free_hw_tbls(hw);
rte_free(hw->port_info);
hw->port_info = NULL;
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
rte_free(pf->proto_xtr);
pf->proto_xtr = NULL;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 60/70] net/ice/base: update comment for overloaded GCO bit
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (58 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 59/70] net/ice/base: add unload flag for control queue shutdown Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 61/70] net/ice/base: complete pending LLDP MIB Qi Zhang
` (10 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Alice Michael
The bit that is overloaded is bit 11 in the flex descriptor,
updating the comment to have the right one reflected.
Signed-off-by: Alice Michael <alice.michael@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_lan_tx_rx.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/base/ice_lan_tx_rx.h b/drivers/net/ice/base/ice_lan_tx_rx.h
index 2b6f039dcb..ba1b9a66d8 100644
--- a/drivers/net/ice/base/ice_lan_tx_rx.h
+++ b/drivers/net/ice/base/ice_lan_tx_rx.h
@@ -544,7 +544,7 @@ struct ice_32b_rx_flex_desc_nic_raw_csum {
__le32 rss_hash;
/* Qword 2 */
- __le16 status_error1; /* bit 6 Raw CSUM present */
+ __le16 status_error1; /* bit 11 Raw CSUM present */
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 61/70] net/ice/base: complete pending LLDP MIB
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (59 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 60/70] net/ice/base: update comment for overloaded GCO bit Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 62/70] net/ice/base: add function to parse DCBX config Qi Zhang
` (9 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang
Cc: dev, Qi Zhang, Tsotne Chakhvadze, Karen Sornek, Anatolii Gerasymenko
Completed structure ice_aqc_lldp_get_mib.
Added 'Pending Event Enable' bit.
Signed-off-by: Tsotne Chakhvadze <tsotne.chakhvadze@intel.com>
Signed-off-by: Karen Sornek <karen.sornek@intel.com>
Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 20 ++++++++++++++++++--
drivers/net/ice/base/ice_dcb.c | 3 +++
2 files changed, 21 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index e1a6847157..dc3c3269d4 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1998,14 +1998,25 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* DCBX mode */
+#define ICE_AQ_LLDP_DCBX_S 6
+#define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S)
+#define ICE_AQ_LLDP_DCBX_NA 0
+#define ICE_AQ_LLDP_DCBX_IEEE 1
+#define ICE_AQ_LLDP_DCBX_CEE 2
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
- u8 reserved1;
+ u8 state;
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_S 0
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M \
+ (0x1 << ICE_AQ_LLDP_MIB_CHANGE_STATE_S)
+#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
+#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
__le16 local_len;
__le16 remote_len;
- u8 reserved2[2];
+ u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@@ -2016,6 +2027,11 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+#define ICE_AQ_LLDP_MIB_PENDING_S 1
+#define ICE_AQ_LLDP_MIB_PENDING_M \
+ (0x1 << ICE_AQ_LLDP_MIB_PENDING_S)
+#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
+#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index 7a850e62f4..d511a5f5ec 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -74,6 +74,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+ else
+ cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE <<
+ ICE_AQ_LLDP_MIB_PENDING_S;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 62/70] net/ice/base: add function to parse DCBX config
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (60 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 61/70] net/ice/base: complete pending LLDP MIB Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:31 ` [PATCH v2 63/70] net/ice/base: handle default VSI lookup type Qi Zhang
` (8 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Anatolii Gerasymenko
LLDP MIB Change Event (opcode 0x0A01) already contains MIB, which
has been changed. Add ice_dcb_process_lldp_set_mib_change() function,
which will set local/remote DCBX config from LLDP MIB Change Event's
buffer.
This function will be used in a base driver handler for LLDP MIB
Change Event.
Signed-off-by: Anatolii Gerasymenko <anatolii.gerasymenko@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_dcb.c | 37 ++++++++++++++++++++++++++++++++++
drivers/net/ice/base/ice_dcb.h | 2 ++
2 files changed, 39 insertions(+)
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index d511a5f5ec..30494e868e 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -970,6 +970,43 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
return ret;
}
+/**
+ * ice_get_dcb_cfg_from_mib_change
+ * @pi: port information structure
+ * @event: pointer to the admin queue receive event
+ *
+ * Set DCB configuration from received MIB Change event
+ */
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ struct ice_aqc_lldp_get_mib *mib;
+ u8 change_type, dcbx_mode;
+
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
+ change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+
+ dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >>
+ ICE_AQ_LLDP_DCBX_S);
+
+ switch (dcbx_mode) {
+ case ICE_AQ_LLDP_DCBX_IEEE:
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
+ break;
+
+ case ICE_AQ_LLDP_DCBX_CEE:
+ pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
+ ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
+ event->msg_buf, pi);
+ break;
+ }
+}
+
/**
* ice_init_dcb
* @hw: pointer to the HW struct
diff --git a/drivers/net/ice/base/ice_dcb.h b/drivers/net/ice/base/ice_dcb.h
index 24c8da2dc8..7e1e4d0297 100644
--- a/drivers/net/ice/base/ice_dcb.h
+++ b/drivers/net/ice/base/ice_dcb.h
@@ -205,6 +205,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event);
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
enum ice_status
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 63/70] net/ice/base: handle default VSI lookup type
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (61 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 62/70] net/ice/base: add function to parse DCBX config Qi Zhang
@ 2022-08-15 7:31 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 64/70] net/ice/base: convert 1588 structs to use bitfields Qi Zhang
` (7 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:31 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Lukasz Kupczak
ICE_SW_LKUP_DFLT is handled in ice_update_vsi_list_rule and
ice_aq_alloc_free_vsi_list.
Signed-off-by: Lukasz Kupczak <lukasz.kupczak@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_switch.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 01441211ff..afc0fff84b 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -3181,6 +3181,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
@@ -4095,6 +4096,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 64/70] net/ice/base: convert 1588 structs to use bitfields
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (62 preceding siblings ...)
2022-08-15 7:31 ` [PATCH v2 63/70] net/ice/base: handle default VSI lookup type Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 65/70] net/ice/base: remove unnecessary fields Qi Zhang
` (6 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
Use bitfields in 1588 structs so they don't waste too much space.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_type.h | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 6d0adf0dd1..043dae7781 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -662,12 +662,12 @@ struct ice_ts_func_info {
/* Function specific info */
enum ice_time_ref_freq time_ref;
u8 clk_freq;
- u8 clk_src;
- u8 tmr_index_assoc;
- u8 ena;
- u8 tmr_index_owned;
- u8 src_tmr_owned;
- u8 tmr_ena;
+ u8 clk_src : 1;
+ u8 tmr_index_assoc : 1;
+ u8 ena : 1;
+ u8 tmr_index_owned : 1;
+ u8 src_tmr_owned : 1;
+ u8 tmr_ena : 1;
};
/* Device specific definitions */
@@ -685,14 +685,14 @@ struct ice_ts_dev_info {
/* Device specific info */
u32 ena_ports;
u32 tmr_own_map;
- u32 tmr0_owner;
- u32 tmr1_owner;
- u8 tmr0_owned;
- u8 tmr1_owned;
- u8 ena;
- u8 tmr0_ena;
- u8 tmr1_ena;
- u8 ts_ll_read;
+ u8 tmr0_owner;
+ u8 tmr1_owner;
+ u8 tmr0_owned : 1;
+ u8 tmr1_owned : 1;
+ u8 ena : 1;
+ u8 tmr0_ena : 1;
+ u8 tmr1_ena : 1;
+ u8 ts_ll_read : 1;
};
/* Function specific capabilities */
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 65/70] net/ice/base: remove unnecessary fields
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (63 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 64/70] net/ice/base: convert 1588 structs to use bitfields Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 66/70] net/ice/base: add GTP tunnel Qi Zhang
` (5 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
Remove unnecessary fields in data structure for 1588 and QoS
func capabilities.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 5 ++---
drivers/net/ice/base/ice_switch.c | 2 --
drivers/net/ice/base/ice_type.h | 6 ------
3 files changed, 2 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 44592d20bf..3d4e05f2b0 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -2579,7 +2579,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
* related information.
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
- info->clk_freq);
+ clk_freq);
info->time_ref = ICE_TIME_REF_FREQ_25_000;
}
@@ -2594,7 +2594,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
info->tmr_index_assoc);
ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
- info->clk_freq);
+ clk_freq);
ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
info->clk_src);
}
@@ -2752,7 +2752,6 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
- u32 logical_id = LE32_TO_CPU(cap->logical_id);
u32 phys_id = LE32_TO_CPU(cap->phys_id);
u32 number = LE32_TO_CPU(cap->number);
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index afc0fff84b..ac045790ad 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -3546,8 +3546,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index 043dae7781..fc5b4b4c5c 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -661,7 +661,6 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
enum ice_time_ref_freq time_ref;
- u8 clk_freq;
u8 clk_src : 1;
u8 tmr_index_assoc : 1;
u8 ena : 1;
@@ -683,7 +682,6 @@ struct ice_ts_func_info {
struct ice_ts_dev_info {
/* Device specific info */
- u32 ena_ports;
u32 tmr_own_map;
u8 tmr0_owner;
u8 tmr1_owner;
@@ -1098,10 +1096,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 66/70] net/ice/base: add GTP tunnel
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (64 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 65/70] net/ice/base: remove unnecessary fields Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 67/70] net/ice/base: check for PTP HW lock more frequently Qi Zhang
` (4 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Marcin Szycik, Michal Swiatkowski
Added GTP tunnel type and also re-order the code to align with
kernel driver.
Signed-off-by: Marcin Szycik <marcin.szycik@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_type.h | 10 +++++++---
drivers/net/ice/base/ice_protocol_type.h | 2 +-
drivers/net/ice/base/ice_switch.c | 8 ++++----
drivers/net/ice/base/ice_switch.h | 3 +++
4 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 070d2aeb1e..988a2db958 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -442,8 +442,10 @@ enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
- TNL_ECPRI,
TNL_GTP,
+ TNL_GTPC,
+ TNL_GTPU,
+ TNL_ECPRI,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -724,8 +726,10 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_PPPOE = 0x8,
- ICE_PROF_TUN_ALL = 0xE,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_PPPOE = 0x20,
+ ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index 74107de988..da1f65fb22 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -45,13 +45,13 @@ enum ice_protocol_type {
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_GTP,
+ ICE_GTP_NO_PAY,
ICE_PPPOE,
ICE_PFCP,
ICE_L2TPV3,
ICE_ESP,
ICE_AH,
ICE_NAT_T,
- ICE_GTP_NO_PAY,
ICE_VLAN_EX,
ICE_VLAN_IN,
ICE_FLG_DIR,
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index ac045790ad..bb7e76bd29 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -8556,10 +8556,6 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_udp_tnl_hdr);
break;
- case ICE_GTP:
- case ICE_GTP_NO_PAY:
- len = sizeof(struct ice_udp_gtp_hdr);
- break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
@@ -8575,6 +8571,10 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_L2TPV3:
len = sizeof(struct ice_l2tpv3_sess_hdr);
break;
+ case ICE_GTP:
+ case ICE_GTP_NO_PAY:
+ len = sizeof(struct ice_udp_gtp_hdr);
+ break;
default:
return ICE_ERR_PARAM;
}
diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h
index 3c05a1531f..dbad9363c4 100644
--- a/drivers/net/ice/base/ice_switch.h
+++ b/drivers/net/ice/base/ice_switch.h
@@ -28,7 +28,10 @@
#define ICE_PROFID_PPPOE_IPV6_UDP 39
#define ICE_PROFID_PPPOE_IPV6_OTHER 40
#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV4_GTPU_EH_IPV4_OTHER 47
#define ICE_PROFID_IPV4_GTPU_IPV4_OTHER 48
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 67/70] net/ice/base: check for PTP HW lock more frequently
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (65 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 66/70] net/ice/base: add GTP tunnel Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 68/70] net/ice/base: expose API for move sched element Qi Zhang
` (3 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Karol Kolacinski
PTP HW semaphore can be held for ~50 ms in worst case.
SW should wait longer and check more frequently if
the HW lock is held.
Signed-off-by: Karol Kolacinski <karol.kolacinski@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_ptp_hw.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index f5ebf5f328..974c96f60c 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -4844,18 +4844,18 @@ bool ice_ptp_lock(struct ice_hw *hw)
u32 hw_lock;
int i;
-#define MAX_TRIES 5
+#define MAX_TRIES 15
for (i = 0; i < MAX_TRIES; i++) {
hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
if (hw_lock) {
/* Somebody is holding the lock */
- ice_msec_delay(10, true);
+ ice_msec_delay(5, true);
continue;
- } else {
- break;
}
+
+ break;
}
return !hw_lock;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 68/70] net/ice/base: expose API for move sched element
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (66 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 67/70] net/ice/base: check for PTP HW lock more frequently Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 69/70] net/ice/base: couple code clean Qi Zhang
` (2 subsequent siblings)
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Ben Shelton
Exposed ice_aq_move_sched_elems to support sched element moving
by AQ command.
Signed-off-by: Ben Shelton <benjamin.h.shelton@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_sched.c | 2 +-
drivers/net/ice/base/ice_sched.h | 10 +++++++---
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index f87b1c4897..3162b528c0 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -440,7 +440,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+enum ice_status
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h
index 53a68dbe51..3793fd3df7 100644
--- a/drivers/net/ice/base/ice_sched.h
+++ b/drivers/net/ice/base/ice_sched.h
@@ -89,6 +89,10 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd);
+enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
@@ -176,12 +180,12 @@ enum ice_status
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc);
enum ice_status
-ice_sched_cfg_sibl_node_prio_lock(struct ice_port_info *pi,
- struct ice_sched_node *node, u8 priority);
-enum ice_status
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio);
enum ice_status
+ice_sched_cfg_sibl_node_prio_lock(struct ice_port_info *pi,
+ struct ice_sched_node *node, u8 priority);
+enum ice_status
ice_cfg_q_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw_alloc);
enum ice_status
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 69/70] net/ice/base: couple code clean
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (67 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 68/70] net/ice/base: expose API for move sched element Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-15 7:32 ` [PATCH v2 70/70] net/ice/base: update copyright Qi Zhang
2022-08-22 5:36 ` [PATCH v2 00/70] ice base code update Yang, Qiming
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang
1. remove unused code
2. reduce variable scope
3. fix comment
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/ice_common.c | 2 +-
drivers/net/ice/base/ice_flex_pipe.c | 20 ++++++--------------
drivers/net/ice/base/ice_flow.c | 1 -
drivers/net/ice/base/ice_nvm.c | 2 +-
drivers/net/ice/base/ice_ptp_hw.c | 13 +++++--------
drivers/net/ice/base/ice_sched.c | 12 ++++++++----
drivers/net/ice/base/ice_switch.c | 7 +++++--
7 files changed, 26 insertions(+), 31 deletions(-)
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 3d4e05f2b0..29d4be6618 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -801,7 +801,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* LFC. Thus, we will use index =
* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
*
- * Also, because we are opearating on transmit timer and fc
+ * Also, because we are operating on transmit timer and fc
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 63ddda2df9..aec6ec3323 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -8,17 +8,6 @@
#include "ice_protocol_type.h"
#include "ice_flow.h"
-/* For supporting double VLAN mode, it is necessary to enable or disable certain
- * boost tcam entries. The metadata labels names that match the following
- * prefixes will be saved to allow enabling double VLAN mode.
- */
-#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
-#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
-
-/* To support tunneling entries by PF, the package will append the PF number to
- * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
- */
-#define ICE_TNL_PRE "TNL_"
static const struct ice_tunnel_type_scan tnls[] = {
{ TNL_VXLAN, "TNL_VXLAN_PF" },
{ TNL_GENEVE, "TNL_GENEVE_PF" },
@@ -526,10 +515,11 @@ ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
*/
enum ice_status ice_set_dvm_boost_entries(struct ice_hw *hw)
{
- enum ice_status status;
u16 i;
for (i = 0; i < hw->dvm_upd.count; i++) {
+ enum ice_status status;
+
status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
if (status)
return status;
@@ -3414,12 +3404,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
/* remove TCAM entries */
LIST_FOR_EACH_ENTRY_SAFE(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list) {
+ enum ice_status status;
+
status = ice_rem_prof_id(hw, blk, d);
if (status)
return status;
@@ -3469,12 +3460,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
LIST_FOR_EACH_ENTRY_SAFE(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
+ enum ice_status status;
+
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
return ice_rem_vsig(hw, blk, vsig, chg);
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 33e97ec333..8a44823895 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -2583,7 +2583,6 @@ ice_flow_set_hw_prof(struct ice_hw *hw, u16 dest_vsi_handle,
struct ice_flow_prof_params *params;
u8 fv_words = hw->blk[blk].es.fvw;
enum ice_status status;
- u16 vsi_num;
int i, idx;
params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index 293b71905d..25a38e1610 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -725,7 +725,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
struct ice_orom_civd_info tmp;
- enum ice_status status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
@@ -734,6 +733,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* equal 0.
*/
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ enum ice_status status;
u8 sum = 0, i;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index 974c96f60c..e6b21809e0 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -4805,10 +4805,7 @@ enum ice_status ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
bool ice_is_pca9575_present(struct ice_hw *hw)
{
enum ice_status status;
- __le16 handle = 0;
-
- if (!ice_is_e810t(hw))
- return false;
+ u16 handle = 0;
status = ice_get_pca9575_handle(hw, &handle);
if (!status && handle)
@@ -4819,8 +4816,8 @@ bool ice_is_pca9575_present(struct ice_hw *hw)
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
+ * The following functions implement shared behavior common to both E822/E823
+ * and E810 devices, possibly calling a device specific implementation where
* necessary.
*/
@@ -5164,9 +5161,9 @@ ice_ptp_adj_clock_at_time(struct ice_hw *hw, u64 at_time, s32 adj)
}
/**
- * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
+ * ice_read_phy_tstamp - Read a PHY timestamp from the timestamp block
* @hw: pointer to the HW struct
- * @block: the block to read from
+ * @block: the block/port to read from
* @idx: the timestamp index to read
* @tstamp: on return, the 40bit timestamp value
*
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 3162b528c0..0e2e26d95e 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -1708,7 +1708,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
@@ -1717,6 +1716,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
+ enum ice_status status;
+
if (!parent)
return ICE_ERR_CFG;
@@ -1810,7 +1811,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
@@ -1820,6 +1820,8 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ enum ice_status status;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
@@ -4860,7 +4862,6 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc)
{
struct ice_sched_node *node = NULL;
- struct ice_sched_node *child_node;
switch (agg_type) {
case ICE_AGG_TYPE_VSI: {
@@ -4891,13 +4892,16 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
node = ice_sched_find_node_by_teid(pi->root, id);
break;
- case ICE_AGG_TYPE_QG:
+ case ICE_AGG_TYPE_QG: {
+ struct ice_sched_node *child_node;
+
/* The current implementation allows single qg to modify */
child_node = ice_sched_find_node_by_teid(pi->root, id);
if (!child_node)
break;
node = child_node->parent;
break;
+ }
default:
break;
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index bb7e76bd29..1cf8faeed5 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -4255,11 +4255,12 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*/
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = ICE_SUCCESS;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ struct ice_switch_info *sw;
+ sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -9536,10 +9537,12 @@ enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
- struct ice_switch_info *sw = hw->switch_info;
+ struct ice_switch_info *sw;
enum ice_status status;
u8 i;
+ sw = hw->switch_info;
+
/* Update the recipes that were created */
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct LIST_HEAD_TYPE *head;
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* [PATCH v2 70/70] net/ice/base: update copyright
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (68 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 69/70] net/ice/base: couple code clean Qi Zhang
@ 2022-08-15 7:32 ` Qi Zhang
2022-08-22 5:36 ` [PATCH v2 00/70] ice base code update Yang, Qiming
70 siblings, 0 replies; 149+ messages in thread
From: Qi Zhang @ 2022-08-15 7:32 UTC (permalink / raw)
To: qiming.yang; +Cc: dev, Qi Zhang, Paul M . Stillwell Jr
Updated copyright to 2022 and update base code version.
Signed-off-by: Paul M. Stillwell Jr <paul.m.stillwell.jr@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/base/README | 4 ++--
drivers/net/ice/base/ice_acl.c | 2 +-
drivers/net/ice/base/ice_acl.h | 2 +-
drivers/net/ice/base/ice_acl_ctrl.c | 2 +-
drivers/net/ice/base/ice_adminq_cmd.h | 2 +-
drivers/net/ice/base/ice_alloc.h | 2 +-
drivers/net/ice/base/ice_bitops.h | 2 +-
drivers/net/ice/base/ice_bst_tcam.c | 2 +-
drivers/net/ice/base/ice_bst_tcam.h | 2 +-
drivers/net/ice/base/ice_cgu_regs.h | 2 +-
drivers/net/ice/base/ice_common.c | 2 +-
drivers/net/ice/base/ice_common.h | 2 +-
drivers/net/ice/base/ice_controlq.c | 2 +-
drivers/net/ice/base/ice_controlq.h | 2 +-
drivers/net/ice/base/ice_dcb.c | 2 +-
drivers/net/ice/base/ice_dcb.h | 2 +-
drivers/net/ice/base/ice_devids.h | 2 +-
drivers/net/ice/base/ice_fdir.c | 2 +-
drivers/net/ice/base/ice_fdir.h | 2 +-
drivers/net/ice/base/ice_flex_pipe.c | 2 +-
drivers/net/ice/base/ice_flex_pipe.h | 2 +-
drivers/net/ice/base/ice_flex_type.h | 2 +-
drivers/net/ice/base/ice_flg_rd.c | 2 +-
drivers/net/ice/base/ice_flg_rd.h | 2 +-
drivers/net/ice/base/ice_flow.c | 2 +-
drivers/net/ice/base/ice_flow.h | 2 +-
drivers/net/ice/base/ice_hw_autogen.h | 2 +-
drivers/net/ice/base/ice_imem.c | 2 +-
drivers/net/ice/base/ice_imem.h | 2 +-
drivers/net/ice/base/ice_lan_tx_rx.h | 2 +-
drivers/net/ice/base/ice_metainit.c | 2 +-
drivers/net/ice/base/ice_metainit.h | 2 +-
drivers/net/ice/base/ice_mk_grp.c | 2 +-
drivers/net/ice/base/ice_mk_grp.h | 2 +-
drivers/net/ice/base/ice_nvm.c | 2 +-
drivers/net/ice/base/ice_nvm.h | 2 +-
drivers/net/ice/base/ice_osdep.h | 2 +-
drivers/net/ice/base/ice_parser.c | 2 +-
drivers/net/ice/base/ice_parser.h | 2 +-
drivers/net/ice/base/ice_parser_rt.c | 2 +-
drivers/net/ice/base/ice_parser_rt.h | 2 +-
drivers/net/ice/base/ice_parser_util.h | 2 +-
drivers/net/ice/base/ice_pg_cam.c | 2 +-
drivers/net/ice/base/ice_pg_cam.h | 2 +-
drivers/net/ice/base/ice_proto_grp.c | 2 +-
drivers/net/ice/base/ice_proto_grp.h | 2 +-
drivers/net/ice/base/ice_protocol_type.h | 2 +-
drivers/net/ice/base/ice_ptp_consts.h | 2 +-
drivers/net/ice/base/ice_ptp_hw.c | 2 +-
drivers/net/ice/base/ice_ptp_hw.h | 2 +-
drivers/net/ice/base/ice_ptype_mk.c | 2 +-
drivers/net/ice/base/ice_ptype_mk.h | 2 +-
drivers/net/ice/base/ice_sbq_cmd.h | 2 +-
drivers/net/ice/base/ice_sched.c | 2 +-
drivers/net/ice/base/ice_sched.h | 2 +-
drivers/net/ice/base/ice_status.h | 2 +-
drivers/net/ice/base/ice_switch.c | 2 +-
drivers/net/ice/base/ice_switch.h | 2 +-
drivers/net/ice/base/ice_tmatch.h | 2 +-
drivers/net/ice/base/ice_type.h | 2 +-
drivers/net/ice/base/ice_vlan_mode.c | 2 +-
drivers/net/ice/base/ice_vlan_mode.h | 2 +-
drivers/net/ice/base/ice_xlt_kb.c | 2 +-
drivers/net/ice/base/ice_xlt_kb.h | 2 +-
64 files changed, 65 insertions(+), 65 deletions(-)
diff --git a/drivers/net/ice/base/README b/drivers/net/ice/base/README
index 8123ec8c30..0e37a5f7a1 100644
--- a/drivers/net/ice/base/README
+++ b/drivers/net/ice/base/README
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2020-2021 Intel Corporation
+ * Copyright(c) 2020-2022 Intel Corporation
*/
Intel® ICE driver
==================
This directory contains source code of FreeBSD ice driver of version
-2021.11.02 released by the team which develops
+2022.08.04 released by the team which develops
basic drivers for any ice NIC. The directory of base/ contains the
original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ice/base/ice_acl.c b/drivers/net/ice/base/ice_acl.c
index 6e1d1ad393..23b6c608be 100644
--- a/drivers/net/ice/base/ice_acl.c
+++ b/drivers/net/ice/base/ice_acl.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_acl.h"
diff --git a/drivers/net/ice/base/ice_acl.h b/drivers/net/ice/base/ice_acl.h
index a63b90faf8..b5f2ec04a4 100644
--- a/drivers/net/ice/base/ice_acl.h
+++ b/drivers/net/ice/base/ice_acl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_ACL_H_
diff --git a/drivers/net/ice/base/ice_acl_ctrl.c b/drivers/net/ice/base/ice_acl_ctrl.c
index 2dd08e326e..3a912f2aa0 100644
--- a/drivers/net/ice/base/ice_acl_ctrl.c
+++ b/drivers/net/ice/base/ice_acl_ctrl.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_acl.h"
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index dc3c3269d4..5a817982b4 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_ADMINQ_CMD_H_
diff --git a/drivers/net/ice/base/ice_alloc.h b/drivers/net/ice/base/ice_alloc.h
index 7fca491ac6..dca502ab25 100644
--- a/drivers/net/ice/base/ice_alloc.h
+++ b/drivers/net/ice/base/ice_alloc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_ALLOC_H_
diff --git a/drivers/net/ice/base/ice_bitops.h b/drivers/net/ice/base/ice_bitops.h
index 8060c103fa..c4ae2b9c8e 100644
--- a/drivers/net/ice/base/ice_bitops.h
+++ b/drivers/net/ice/base/ice_bitops.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_BITOPS_H_
diff --git a/drivers/net/ice/base/ice_bst_tcam.c b/drivers/net/ice/base/ice_bst_tcam.c
index 74a2de869e..5cc0d12251 100644
--- a/drivers/net/ice/base/ice_bst_tcam.c
+++ b/drivers/net/ice/base/ice_bst_tcam.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_bst_tcam.h b/drivers/net/ice/base/ice_bst_tcam.h
index e4c96c439d..292444c919 100644
--- a/drivers/net/ice/base/ice_bst_tcam.h
+++ b/drivers/net/ice/base/ice_bst_tcam.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_BST_TCAM_H_
diff --git a/drivers/net/ice/base/ice_cgu_regs.h b/drivers/net/ice/base/ice_cgu_regs.h
index 6751481e83..6b9a359c5b 100644
--- a/drivers/net/ice/base/ice_cgu_regs.h
+++ b/drivers/net/ice/base/ice_cgu_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_CGU_REGS_H_
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index 29d4be6618..5391bd666b 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 9101ae11af..58260afb93 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_COMMON_H_
diff --git a/drivers/net/ice/base/ice_controlq.c b/drivers/net/ice/base/ice_controlq.c
index 8a6311572c..8971a140ef 100644
--- a/drivers/net/ice/base/ice_controlq.c
+++ b/drivers/net/ice/base/ice_controlq.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_controlq.h b/drivers/net/ice/base/ice_controlq.h
index 840fb5e22e..45fe70450e 100644
--- a/drivers/net/ice/base/ice_controlq.h
+++ b/drivers/net/ice/base/ice_controlq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_CONTROLQ_H_
diff --git a/drivers/net/ice/base/ice_dcb.c b/drivers/net/ice/base/ice_dcb.c
index 30494e868e..0e604df541 100644
--- a/drivers/net/ice/base/ice_dcb.c
+++ b/drivers/net/ice/base/ice_dcb.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_dcb.h b/drivers/net/ice/base/ice_dcb.h
index 7e1e4d0297..d010c539a6 100644
--- a/drivers/net/ice/base/ice_dcb.h
+++ b/drivers/net/ice/base/ice_dcb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_DCB_H_
diff --git a/drivers/net/ice/base/ice_devids.h b/drivers/net/ice/base/ice_devids.h
index 937111844d..13a4b16402 100644
--- a/drivers/net/ice/base/ice_devids.h
+++ b/drivers/net/ice/base/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_DEVIDS_H_
diff --git a/drivers/net/ice/base/ice_fdir.c b/drivers/net/ice/base/ice_fdir.c
index a554379075..85ca29bae5 100644
--- a/drivers/net/ice/base/ice_fdir.c
+++ b/drivers/net/ice/base/ice_fdir.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_fdir.h b/drivers/net/ice/base/ice_fdir.h
index ced880fff1..f338da38c2 100644
--- a/drivers/net/ice/base/ice_fdir.h
+++ b/drivers/net/ice/base/ice_fdir.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_FDIR_H_
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index aec6ec3323..b6bc0062a3 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 8fde36dfa6..9ba337e1fa 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_FLEX_PIPE_H_
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 988a2db958..7b8f6f9049 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_FLEX_TYPE_H_
diff --git a/drivers/net/ice/base/ice_flg_rd.c b/drivers/net/ice/base/ice_flg_rd.c
index 80d3b51ad6..f320958bd3 100644
--- a/drivers/net/ice/base/ice_flg_rd.c
+++ b/drivers/net/ice/base/ice_flg_rd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_flg_rd.h b/drivers/net/ice/base/ice_flg_rd.h
index 6c3e01b0fa..8cd375f89f 100644
--- a/drivers/net/ice/base/ice_flg_rd.h
+++ b/drivers/net/ice/base/ice_flg_rd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_FLG_RD_H_
diff --git a/drivers/net/ice/base/ice_flow.c b/drivers/net/ice/base/ice_flow.c
index 8a44823895..3483a5ed4f 100644
--- a/drivers/net/ice/base/ice_flow.c
+++ b/drivers/net/ice/base/ice_flow.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_flow.h b/drivers/net/ice/base/ice_flow.h
index 5729392362..dba71aab74 100644
--- a/drivers/net/ice/base/ice_flow.h
+++ b/drivers/net/ice/base/ice_flow.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_FLOW_H_
diff --git a/drivers/net/ice/base/ice_hw_autogen.h b/drivers/net/ice/base/ice_hw_autogen.h
index 10b1116931..6dc77bf7cb 100644
--- a/drivers/net/ice/base/ice_hw_autogen.h
+++ b/drivers/net/ice/base/ice_hw_autogen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
/* Machine generated file. Do not edit. */
diff --git a/drivers/net/ice/base/ice_imem.c b/drivers/net/ice/base/ice_imem.c
index 9a76d21ce5..277311fd20 100644
--- a/drivers/net/ice/base/ice_imem.c
+++ b/drivers/net/ice/base/ice_imem.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_imem.h b/drivers/net/ice/base/ice_imem.h
index 8b1eccc1b9..06d3d5a96d 100644
--- a/drivers/net/ice/base/ice_imem.h
+++ b/drivers/net/ice/base/ice_imem.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_IMEM_H_
diff --git a/drivers/net/ice/base/ice_lan_tx_rx.h b/drivers/net/ice/base/ice_lan_tx_rx.h
index ba1b9a66d8..be6d88f0ca 100644
--- a/drivers/net/ice/base/ice_lan_tx_rx.h
+++ b/drivers/net/ice/base/ice_lan_tx_rx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_LAN_TX_RX_H_
diff --git a/drivers/net/ice/base/ice_metainit.c b/drivers/net/ice/base/ice_metainit.c
index a899125b37..b75d68c010 100644
--- a/drivers/net/ice/base/ice_metainit.c
+++ b/drivers/net/ice/base/ice_metainit.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_metainit.h b/drivers/net/ice/base/ice_metainit.h
index d46f1d7b47..dad4faf4d4 100644
--- a/drivers/net/ice/base/ice_metainit.h
+++ b/drivers/net/ice/base/ice_metainit.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_METAINIT_H_
diff --git a/drivers/net/ice/base/ice_mk_grp.c b/drivers/net/ice/base/ice_mk_grp.c
index 814001c49e..cafe51544d 100644
--- a/drivers/net/ice/base/ice_mk_grp.c
+++ b/drivers/net/ice/base/ice_mk_grp.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_mk_grp.h b/drivers/net/ice/base/ice_mk_grp.h
index 04d11b49c2..9401647ef0 100644
--- a/drivers/net/ice/base/ice_mk_grp.h
+++ b/drivers/net/ice/base/ice_mk_grp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_MK_GRP_H_
diff --git a/drivers/net/ice/base/ice_nvm.c b/drivers/net/ice/base/ice_nvm.c
index 25a38e1610..6550dda557 100644
--- a/drivers/net/ice/base/ice_nvm.c
+++ b/drivers/net/ice/base/ice_nvm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_nvm.h b/drivers/net/ice/base/ice_nvm.h
index 52e8853b19..a8cda452db 100644
--- a/drivers/net/ice/base/ice_nvm.h
+++ b/drivers/net/ice/base/ice_nvm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_NVM_H_
diff --git a/drivers/net/ice/base/ice_osdep.h b/drivers/net/ice/base/ice_osdep.h
index 8160eb68ee..4b92057521 100644
--- a/drivers/net/ice/base/ice_osdep.h
+++ b/drivers/net/ice/base/ice_osdep.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2021 Intel Corporation
+ * Copyright(c) 2018-2022 Intel Corporation
*/
#ifndef _ICE_OSDEP_H_
diff --git a/drivers/net/ice/base/ice_parser.c b/drivers/net/ice/base/ice_parser.c
index 5a461d83be..a1b906d369 100644
--- a/drivers/net/ice/base/ice_parser.c
+++ b/drivers/net/ice/base/ice_parser.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_parser.h b/drivers/net/ice/base/ice_parser.h
index 22c73b686b..b4c5e7b14d 100644
--- a/drivers/net/ice/base/ice_parser.h
+++ b/drivers/net/ice/base/ice_parser.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PARSER_H_
diff --git a/drivers/net/ice/base/ice_parser_rt.c b/drivers/net/ice/base/ice_parser_rt.c
index 22cd748248..215e11abd2 100644
--- a/drivers/net/ice/base/ice_parser_rt.c
+++ b/drivers/net/ice/base/ice_parser_rt.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_parser_rt.h b/drivers/net/ice/base/ice_parser_rt.h
index 28350d018a..c1e1af0059 100644
--- a/drivers/net/ice/base/ice_parser_rt.h
+++ b/drivers/net/ice/base/ice_parser_rt.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PARSER_RT_H_
diff --git a/drivers/net/ice/base/ice_parser_util.h b/drivers/net/ice/base/ice_parser_util.h
index cf0222bed8..a33d6bf11c 100644
--- a/drivers/net/ice/base/ice_parser_util.h
+++ b/drivers/net/ice/base/ice_parser_util.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PARSER_UTIL_H_
diff --git a/drivers/net/ice/base/ice_pg_cam.c b/drivers/net/ice/base/ice_pg_cam.c
index 73f7c34ffd..f06a3581a0 100644
--- a/drivers/net/ice/base/ice_pg_cam.c
+++ b/drivers/net/ice/base/ice_pg_cam.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_pg_cam.h b/drivers/net/ice/base/ice_pg_cam.h
index aeadc20a77..ac0863afb0 100644
--- a/drivers/net/ice/base/ice_pg_cam.h
+++ b/drivers/net/ice/base/ice_pg_cam.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PG_CAM_H_
diff --git a/drivers/net/ice/base/ice_proto_grp.c b/drivers/net/ice/base/ice_proto_grp.c
index 5dbe07d258..a9ed9e051f 100644
--- a/drivers/net/ice/base/ice_proto_grp.c
+++ b/drivers/net/ice/base/ice_proto_grp.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_proto_grp.h b/drivers/net/ice/base/ice_proto_grp.h
index 1a5b5d5f44..762d32464b 100644
--- a/drivers/net/ice/base/ice_proto_grp.h
+++ b/drivers/net/ice/base/ice_proto_grp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PROTO_GRP_H_
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index da1f65fb22..d17ab54bd3 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PROTOCOL_TYPE_H_
diff --git a/drivers/net/ice/base/ice_ptp_consts.h b/drivers/net/ice/base/ice_ptp_consts.h
index 32eb60ab48..ddf6242d8e 100644
--- a/drivers/net/ice/base/ice_ptp_consts.h
+++ b/drivers/net/ice/base/ice_ptp_consts.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PTP_CONSTS_H_
diff --git a/drivers/net/ice/base/ice_ptp_hw.c b/drivers/net/ice/base/ice_ptp_hw.c
index e6b21809e0..a0b8af1b94 100644
--- a/drivers/net/ice/base/ice_ptp_hw.c
+++ b/drivers/net/ice/base/ice_ptp_hw.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_type.h"
diff --git a/drivers/net/ice/base/ice_ptp_hw.h b/drivers/net/ice/base/ice_ptp_hw.h
index 9fa17787df..09c236e7e0 100644
--- a/drivers/net/ice/base/ice_ptp_hw.h
+++ b/drivers/net/ice/base/ice_ptp_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PTP_HW_H_
diff --git a/drivers/net/ice/base/ice_ptype_mk.c b/drivers/net/ice/base/ice_ptype_mk.c
index 9807e688b1..4cd8396167 100644
--- a/drivers/net/ice/base/ice_ptype_mk.c
+++ b/drivers/net/ice/base/ice_ptype_mk.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_ptype_mk.h b/drivers/net/ice/base/ice_ptype_mk.h
index 2cd49b1b63..3efe294dda 100644
--- a/drivers/net/ice/base/ice_ptype_mk.h
+++ b/drivers/net/ice/base/ice_ptype_mk.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_PTYPE_MK_H_
diff --git a/drivers/net/ice/base/ice_sbq_cmd.h b/drivers/net/ice/base/ice_sbq_cmd.h
index 76c718b252..a215303a56 100644
--- a/drivers/net/ice/base/ice_sbq_cmd.h
+++ b/drivers/net/ice/base/ice_sbq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_SBQ_CMD_H_
diff --git a/drivers/net/ice/base/ice_sched.c b/drivers/net/ice/base/ice_sched.c
index 0e2e26d95e..a526c8f32c 100644
--- a/drivers/net/ice/base/ice_sched.c
+++ b/drivers/net/ice/base/ice_sched.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_sched.h"
diff --git a/drivers/net/ice/base/ice_sched.h b/drivers/net/ice/base/ice_sched.h
index 3793fd3df7..3724ef33a8 100644
--- a/drivers/net/ice/base/ice_sched.h
+++ b/drivers/net/ice/base/ice_sched.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_SCHED_H_
diff --git a/drivers/net/ice/base/ice_status.h b/drivers/net/ice/base/ice_status.h
index 9df878be01..f52121c3a3 100644
--- a/drivers/net/ice/base/ice_status.h
+++ b/drivers/net/ice/base/ice_status.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_STATUS_H_
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 1cf8faeed5..4b115ce660 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_switch.h b/drivers/net/ice/base/ice_switch.h
index dbad9363c4..01b49595d2 100644
--- a/drivers/net/ice/base/ice_switch.h
+++ b/drivers/net/ice/base/ice_switch.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_SWITCH_H_
diff --git a/drivers/net/ice/base/ice_tmatch.h b/drivers/net/ice/base/ice_tmatch.h
index 178a084639..e70926acd1 100644
--- a/drivers/net/ice/base/ice_tmatch.h
+++ b/drivers/net/ice/base/ice_tmatch.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_TMATCH_H_
diff --git a/drivers/net/ice/base/ice_type.h b/drivers/net/ice/base/ice_type.h
index fc5b4b4c5c..54a753545e 100644
--- a/drivers/net/ice/base/ice_type.h
+++ b/drivers/net/ice/base/ice_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_TYPE_H_
diff --git a/drivers/net/ice/base/ice_vlan_mode.c b/drivers/net/ice/base/ice_vlan_mode.c
index d1003a5a89..74d414b3b8 100644
--- a/drivers/net/ice/base/ice_vlan_mode.c
+++ b/drivers/net/ice/base/ice_vlan_mode.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_vlan_mode.h b/drivers/net/ice/base/ice_vlan_mode.h
index 0e41b84000..5e3f454a25 100644
--- a/drivers/net/ice/base/ice_vlan_mode.h
+++ b/drivers/net/ice/base/ice_vlan_mode.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_VLAN_MODE_H_
diff --git a/drivers/net/ice/base/ice_xlt_kb.c b/drivers/net/ice/base/ice_xlt_kb.c
index 5efe209cad..59472a08d4 100644
--- a/drivers/net/ice/base/ice_xlt_kb.c
+++ b/drivers/net/ice/base/ice_xlt_kb.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#include "ice_common.h"
diff --git a/drivers/net/ice/base/ice_xlt_kb.h b/drivers/net/ice/base/ice_xlt_kb.h
index ec802b663a..f870f18ed6 100644
--- a/drivers/net/ice/base/ice_xlt_kb.h
+++ b/drivers/net/ice/base/ice_xlt_kb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2021 Intel Corporation
+ * Copyright(c) 2001-2022 Intel Corporation
*/
#ifndef _ICE_XLT_KB_H_
--
2.31.1
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 00/70] ice base code update
2022-08-15 7:30 ` [PATCH v2 00/70] ice base code update Qi Zhang
` (69 preceding siblings ...)
2022-08-15 7:32 ` [PATCH v2 70/70] net/ice/base: update copyright Qi Zhang
@ 2022-08-22 5:36 ` Yang, Qiming
2022-09-01 13:11 ` Zhang, Qi Z
70 siblings, 1 reply; 149+ messages in thread
From: Yang, Qiming @ 2022-08-22 5:36 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: dev
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, August 15, 2022 3:31 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH v2 00/70] ice base code update
>
> Update ice base code to 2022-Aug internal release.
>
> Summary:
>
> 1. Baseline support for L2TPv2 FDIR/RSS.
> 2. Refactor DDP module.
> 3. Support 56G PHY
> 4. Add GTP/GRE tunnel.
> 6. Clean code and fix bug
> 5. update copyright
>
>
> v2:
> - fix couple patchwork warnings.
>
> Qi Zhang (70):
> net/ice/base: add netlist helper functions
> net/ice/base: get NVM CSS Header length from the CSS Header
> net/ice/base: combine functions for VSI promisc
> net/ice/base: make function names more generic
> net/ice/base: fix incorrect division during E822 PTP init
> net/ice/base: added auto drop blocking packets functionality
> net/ice/base: fix 100M speed
> net/ice/base: support VXLAN and GRE for RSS
> net/ice/base: fix DSCP PFC TLV creation
> net/ice/base: complete the health status codes
> net/ice/base: explicitly name E822 HW-dependent functions
> net/ice/base: move code block
> net/ice/base: add PHY 56G destination address
> net/ice/base: add 56G PHY register definitions
> net/ice/base: implement 56G PHY access functions
> net/ice/base: implement 56G PHY setup functions
> net/ice/base: work around missing PTP caps
> net/ice/base: enable calling of ETH56G functions
> net/ice/base: fix PHY type 10G SFI C2C to media type mapping
> net/ice/base: refactor DDP code
> net/ice/base: add E822 generic PCI device ID
> net/ice/base: support double VLAN rules
> net/ice/base: report NVM version numbers on mismatch
> net/ice/base: create duplicate detection for ACL rules
> net/ice/base: fix incorrect function descriptions for parser
> net/ice/base: fix endian format
> net/ice/base: convert IO expander handle to u16
> net/ice/base: convert array of u8 to bitmap
> net/ice/base: fix array overflow in add switch recipe code
> net/ice/base: fix bit finding range over ptype bitmap
> net/ice/base: move function to internal
> net/ice/base: change PHY/QUAD/ports definitions
> net/ice/base: add AQ command to config node attribute
> net/ice/base: fix null pointer dereference during
> net/ice/base: refine default VSI config
> net/ice/base: fix add mac rule
> net/ice/base: support Tx topo config
> net/ice/base: adjust the VSI/Aggregator layers
> net/ice/base: add data typecasting to match sizes
> net/ice/base: add helper function to check if device is E823
> net/ice/base: add low latency Tx timestamp read
> net/ice/base: fix double VLAN error in promisc mode
> net/ice/base: move functions
> net/ice/base: complete support for Tx balancing
> net/ice/base: update definitions for AQ internal debug dump
> net/ice/base: update macros of L2TPv2 ptype value
> net/ice/base: refine header file include
> net/ice/base: ignore already exist error
> net/ice/base: clean up with no lookups
> net/ice/base: add support for Auto FEC with FEC disabled
> net/ice/base: update PHY type high max index
> net/ice/base: clean the main timer command register
> net/ice/base: add support for custom WPC and LGB NICs
> net/ice/base: add generic MAC with 3K signature segment
> net/ice/base: enable RSS support for L2TPv2 session ID
> net/ice/base: enable FDIR support for L2TPv2
> net/ice/base: add GRE Tap tunnel type
> net/ice/base: fix wrong inputset of GTPoGRE packet
> net/ice/base: add unload flag for control queue shutdown
> net/ice/base: update comment for overloaded GCO bit
> net/ice/base: complete pending LLDP MIB
> net/ice/base: add function to parse DCBX config
> net/ice/base: handle default VSI lookup type
> net/ice/base: convert 1588 structs to use bitfields
> net/ice/base: remove unnecessary fields
> net/ice/base: add GTP tunnel
> net/ice/base: check for PTP HW lock more frequently
> net/ice/base: expose API for move sched element
> net/ice/base: couple code clean
> net/ice/base: update copyright
>
> drivers/net/ice/base/README | 4 +-
> drivers/net/ice/base/ice_acl.c | 2 +-
> drivers/net/ice/base/ice_acl.h | 2 +-
> drivers/net/ice/base/ice_acl_ctrl.c | 36 +-
> drivers/net/ice/base/ice_adminq_cmd.h | 175 +-
> drivers/net/ice/base/ice_alloc.h | 2 +-
> drivers/net/ice/base/ice_bitops.h | 7 +-
> drivers/net/ice/base/ice_bst_tcam.c | 8 +-
> drivers/net/ice/base/ice_bst_tcam.h | 2 +-
> drivers/net/ice/base/ice_cgu_regs.h | 2 +-
> drivers/net/ice/base/ice_common.c | 371 ++-
> drivers/net/ice/base/ice_common.h | 22 +-
> drivers/net/ice/base/ice_controlq.c | 33 +-
> drivers/net/ice/base/ice_controlq.h | 2 +-
> drivers/net/ice/base/ice_dcb.c | 52 +-
> drivers/net/ice/base/ice_dcb.h | 4 +-
> drivers/net/ice/base/ice_ddp.c | 2475 ++++++++++++++++++++
> drivers/net/ice/base/ice_ddp.h | 466 ++++
> drivers/net/ice/base/ice_defs.h | 49 +
> drivers/net/ice/base/ice_devids.h | 9 +-
> drivers/net/ice/base/ice_fdir.c | 812 ++++++-
> drivers/net/ice/base/ice_fdir.h | 28 +-
> drivers/net/ice/base/ice_flex_pipe.c | 2541 +++------------------
> drivers/net/ice/base/ice_flex_pipe.h | 66 +-
> drivers/net/ice/base/ice_flex_type.h | 359 +--
> drivers/net/ice/base/ice_flg_rd.c | 6 +-
> drivers/net/ice/base/ice_flg_rd.h | 2 +-
> drivers/net/ice/base/ice_flow.c | 100 +-
> drivers/net/ice/base/ice_flow.h | 16 +-
> drivers/net/ice/base/ice_hw_autogen.h | 2 +-
> drivers/net/ice/base/ice_imem.c | 6 +-
> drivers/net/ice/base/ice_imem.h | 2 +-
> drivers/net/ice/base/ice_lan_tx_rx.h | 4 +-
> drivers/net/ice/base/ice_metainit.c | 6 +-
> drivers/net/ice/base/ice_metainit.h | 2 +-
> drivers/net/ice/base/ice_mk_grp.c | 6 +-
> drivers/net/ice/base/ice_mk_grp.h | 2 +-
> drivers/net/ice/base/ice_nvm.c | 67 +-
> drivers/net/ice/base/ice_nvm.h | 2 +-
> drivers/net/ice/base/ice_osdep.h | 2 +-
> drivers/net/ice/base/ice_parser.c | 9 +-
> drivers/net/ice/base/ice_parser.h | 2 +-
> drivers/net/ice/base/ice_parser_rt.c | 2 +-
> drivers/net/ice/base/ice_parser_rt.h | 2 +-
> drivers/net/ice/base/ice_parser_util.h | 2 +-
> drivers/net/ice/base/ice_pg_cam.c | 14 +-
> drivers/net/ice/base/ice_pg_cam.h | 2 +-
> drivers/net/ice/base/ice_proto_grp.c | 6 +-
> drivers/net/ice/base/ice_proto_grp.h | 2 +-
> drivers/net/ice/base/ice_protocol_type.h | 4 +-
> drivers/net/ice/base/ice_ptp_consts.h | 2 +-
> drivers/net/ice/base/ice_ptp_hw.c | 2618 +++++++++++++++++-----
> drivers/net/ice/base/ice_ptp_hw.h | 149 +-
> drivers/net/ice/base/ice_ptype_mk.c | 6 +-
> drivers/net/ice/base/ice_ptype_mk.h | 2 +-
> drivers/net/ice/base/ice_sbq_cmd.h | 3 +-
> drivers/net/ice/base/ice_sched.c | 106 +-
> drivers/net/ice/base/ice_sched.h | 19 +-
> drivers/net/ice/base/ice_status.h | 2 +-
> drivers/net/ice/base/ice_switch.c | 986 ++++----
> drivers/net/ice/base/ice_switch.h | 9 +-
> drivers/net/ice/base/ice_tmatch.h | 2 +-
> drivers/net/ice/base/ice_type.h | 200 +-
> drivers/net/ice/base/ice_vlan_mode.c | 3 +-
> drivers/net/ice/base/ice_vlan_mode.h | 2 +-
> drivers/net/ice/base/ice_xlt_kb.c | 12 +-
> drivers/net/ice/base/ice_xlt_kb.h | 2 +-
> drivers/net/ice/base/meson.build | 1 +
> drivers/net/ice/ice_ethdev.c | 5 +-
> 69 files changed, 7890 insertions(+), 4038 deletions(-) create mode 100644
> drivers/net/ice/base/ice_ddp.c create mode 100644
> drivers/net/ice/base/ice_ddp.h create mode 100644
> drivers/net/ice/base/ice_defs.h
>
> --
> 2.31.1
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 149+ messages in thread
* RE: [PATCH v2 00/70] ice base code update
2022-08-22 5:36 ` [PATCH v2 00/70] ice base code update Yang, Qiming
@ 2022-09-01 13:11 ` Zhang, Qi Z
0 siblings, 0 replies; 149+ messages in thread
From: Zhang, Qi Z @ 2022-09-01 13:11 UTC (permalink / raw)
To: Yang, Qiming; +Cc: dev
> -----Original Message-----
> From: Yang, Qiming <qiming.yang@intel.com>
> Sent: Monday, August 22, 2022 1:37 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 00/70] ice base code update
>
>
>
> > -----Original Message-----
> > From: Zhang, Qi Z <qi.z.zhang@intel.com>
> > Sent: Monday, August 15, 2022 3:31 PM
> > To: Yang, Qiming <qiming.yang@intel.com>
> > Cc: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>
> > Subject: [PATCH v2 00/70] ice base code update
> >
> > Update ice base code to 2022-Aug internal release.
> >
> > Summary:
> >
> > 1. Baseline support for L2TPv2 FDIR/RSS.
> > 2. Refactor DDP module.
> > 3. Support 56G PHY
> > 4. Add GTP/GRE tunnel.
> > 6. Clean code and fix bug
> > 5. update copyright
> >
> >
> > v2:
> > - fix couple patchwork warnings.
> >
> > Qi Zhang (70):
> > net/ice/base: add netlist helper functions
> > net/ice/base: get NVM CSS Header length from the CSS Header
> > net/ice/base: combine functions for VSI promisc
> > net/ice/base: make function names more generic
> > net/ice/base: fix incorrect division during E822 PTP init
> > net/ice/base: added auto drop blocking packets functionality
> > net/ice/base: fix 100M speed
> > net/ice/base: support VXLAN and GRE for RSS
> > net/ice/base: fix DSCP PFC TLV creation
> > net/ice/base: complete the health status codes
> > net/ice/base: explicitly name E822 HW-dependent functions
> > net/ice/base: move code block
> > net/ice/base: add PHY 56G destination address
> > net/ice/base: add 56G PHY register definitions
> > net/ice/base: implement 56G PHY access functions
> > net/ice/base: implement 56G PHY setup functions
> > net/ice/base: work around missing PTP caps
> > net/ice/base: enable calling of ETH56G functions
> > net/ice/base: fix PHY type 10G SFI C2C to media type mapping
> > net/ice/base: refactor DDP code
> > net/ice/base: add E822 generic PCI device ID
> > net/ice/base: support double VLAN rules
> > net/ice/base: report NVM version numbers on mismatch
> > net/ice/base: create duplicate detection for ACL rules
> > net/ice/base: fix incorrect function descriptions for parser
> > net/ice/base: fix endian format
> > net/ice/base: convert IO expander handle to u16
> > net/ice/base: convert array of u8 to bitmap
> > net/ice/base: fix array overflow in add switch recipe code
> > net/ice/base: fix bit finding range over ptype bitmap
> > net/ice/base: move function to internal
> > net/ice/base: change PHY/QUAD/ports definitions
> > net/ice/base: add AQ command to config node attribute
> > net/ice/base: fix null pointer dereference during
> > net/ice/base: refine default VSI config
> > net/ice/base: fix add mac rule
> > net/ice/base: support Tx topo config
> > net/ice/base: adjust the VSI/Aggregator layers
> > net/ice/base: add data typecasting to match sizes
> > net/ice/base: add helper function to check if device is E823
> > net/ice/base: add low latency Tx timestamp read
> > net/ice/base: fix double VLAN error in promisc mode
> > net/ice/base: move functions
> > net/ice/base: complete support for Tx balancing
> > net/ice/base: update definitions for AQ internal debug dump
> > net/ice/base: update macros of L2TPv2 ptype value
> > net/ice/base: refine header file include
> > net/ice/base: ignore already exist error
> > net/ice/base: clean up with no lookups
> > net/ice/base: add support for Auto FEC with FEC disabled
> > net/ice/base: update PHY type high max index
> > net/ice/base: clean the main timer command register
> > net/ice/base: add support for custom WPC and LGB NICs
> > net/ice/base: add generic MAC with 3K signature segment
> > net/ice/base: enable RSS support for L2TPv2 session ID
> > net/ice/base: enable FDIR support for L2TPv2
> > net/ice/base: add GRE Tap tunnel type
> > net/ice/base: fix wrong inputset of GTPoGRE packet
> > net/ice/base: add unload flag for control queue shutdown
> > net/ice/base: update comment for overloaded GCO bit
> > net/ice/base: complete pending LLDP MIB
> > net/ice/base: add function to parse DCBX config
> > net/ice/base: handle default VSI lookup type
> > net/ice/base: convert 1588 structs to use bitfields
> > net/ice/base: remove unnecessary fields
> > net/ice/base: add GTP tunnel
> > net/ice/base: check for PTP HW lock more frequently
> > net/ice/base: expose API for move sched element
> > net/ice/base: couple code clean
> > net/ice/base: update copyright
> >
> > drivers/net/ice/base/README | 4 +-
> > drivers/net/ice/base/ice_acl.c | 2 +-
> > drivers/net/ice/base/ice_acl.h | 2 +-
> > drivers/net/ice/base/ice_acl_ctrl.c | 36 +-
> > drivers/net/ice/base/ice_adminq_cmd.h | 175 +-
> > drivers/net/ice/base/ice_alloc.h | 2 +-
> > drivers/net/ice/base/ice_bitops.h | 7 +-
> > drivers/net/ice/base/ice_bst_tcam.c | 8 +-
> > drivers/net/ice/base/ice_bst_tcam.h | 2 +-
> > drivers/net/ice/base/ice_cgu_regs.h | 2 +-
> > drivers/net/ice/base/ice_common.c | 371 ++-
> > drivers/net/ice/base/ice_common.h | 22 +-
> > drivers/net/ice/base/ice_controlq.c | 33 +-
> > drivers/net/ice/base/ice_controlq.h | 2 +-
> > drivers/net/ice/base/ice_dcb.c | 52 +-
> > drivers/net/ice/base/ice_dcb.h | 4 +-
> > drivers/net/ice/base/ice_ddp.c | 2475 ++++++++++++++++++++
> > drivers/net/ice/base/ice_ddp.h | 466 ++++
> > drivers/net/ice/base/ice_defs.h | 49 +
> > drivers/net/ice/base/ice_devids.h | 9 +-
> > drivers/net/ice/base/ice_fdir.c | 812 ++++++-
> > drivers/net/ice/base/ice_fdir.h | 28 +-
> > drivers/net/ice/base/ice_flex_pipe.c | 2541 +++------------------
> > drivers/net/ice/base/ice_flex_pipe.h | 66 +-
> > drivers/net/ice/base/ice_flex_type.h | 359 +--
> > drivers/net/ice/base/ice_flg_rd.c | 6 +-
> > drivers/net/ice/base/ice_flg_rd.h | 2 +-
> > drivers/net/ice/base/ice_flow.c | 100 +-
> > drivers/net/ice/base/ice_flow.h | 16 +-
> > drivers/net/ice/base/ice_hw_autogen.h | 2 +-
> > drivers/net/ice/base/ice_imem.c | 6 +-
> > drivers/net/ice/base/ice_imem.h | 2 +-
> > drivers/net/ice/base/ice_lan_tx_rx.h | 4 +-
> > drivers/net/ice/base/ice_metainit.c | 6 +-
> > drivers/net/ice/base/ice_metainit.h | 2 +-
> > drivers/net/ice/base/ice_mk_grp.c | 6 +-
> > drivers/net/ice/base/ice_mk_grp.h | 2 +-
> > drivers/net/ice/base/ice_nvm.c | 67 +-
> > drivers/net/ice/base/ice_nvm.h | 2 +-
> > drivers/net/ice/base/ice_osdep.h | 2 +-
> > drivers/net/ice/base/ice_parser.c | 9 +-
> > drivers/net/ice/base/ice_parser.h | 2 +-
> > drivers/net/ice/base/ice_parser_rt.c | 2 +-
> > drivers/net/ice/base/ice_parser_rt.h | 2 +-
> > drivers/net/ice/base/ice_parser_util.h | 2 +-
> > drivers/net/ice/base/ice_pg_cam.c | 14 +-
> > drivers/net/ice/base/ice_pg_cam.h | 2 +-
> > drivers/net/ice/base/ice_proto_grp.c | 6 +-
> > drivers/net/ice/base/ice_proto_grp.h | 2 +-
> > drivers/net/ice/base/ice_protocol_type.h | 4 +-
> > drivers/net/ice/base/ice_ptp_consts.h | 2 +-
> > drivers/net/ice/base/ice_ptp_hw.c | 2618 +++++++++++++++++-----
> > drivers/net/ice/base/ice_ptp_hw.h | 149 +-
> > drivers/net/ice/base/ice_ptype_mk.c | 6 +-
> > drivers/net/ice/base/ice_ptype_mk.h | 2 +-
> > drivers/net/ice/base/ice_sbq_cmd.h | 3 +-
> > drivers/net/ice/base/ice_sched.c | 106 +-
> > drivers/net/ice/base/ice_sched.h | 19 +-
> > drivers/net/ice/base/ice_status.h | 2 +-
> > drivers/net/ice/base/ice_switch.c | 986 ++++----
> > drivers/net/ice/base/ice_switch.h | 9 +-
> > drivers/net/ice/base/ice_tmatch.h | 2 +-
> > drivers/net/ice/base/ice_type.h | 200 +-
> > drivers/net/ice/base/ice_vlan_mode.c | 3 +-
> > drivers/net/ice/base/ice_vlan_mode.h | 2 +-
> > drivers/net/ice/base/ice_xlt_kb.c | 12 +-
> > drivers/net/ice/base/ice_xlt_kb.h | 2 +-
> > drivers/net/ice/base/meson.build | 1 +
> > drivers/net/ice/ice_ethdev.c | 5 +-
> > 69 files changed, 7890 insertions(+), 4038 deletions(-) create mode
> > 100644 drivers/net/ice/base/ice_ddp.c create mode 100644
> > drivers/net/ice/base/ice_ddp.h create mode 100644
> > drivers/net/ice/base/ice_defs.h
> >
> > --
> > 2.31.1
>
> Acked-by: Qiming Yang <qiming.yang@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 149+ messages in thread