* [PATCH v1 2/2] net/ice: add Tx Time queue context configuration support
2025-01-17 9:39 [PATCH v1 0/2] Update Base code for TXPP Implementation Soumyadeep Hore
2025-01-17 9:39 ` [PATCH v1 1/2] net/ice: add tstamp descriptor Soumyadeep Hore
@ 2025-01-17 9:39 ` Soumyadeep Hore
1 sibling, 0 replies; 3+ messages in thread
From: Soumyadeep Hore @ 2025-01-17 9:39 UTC (permalink / raw)
To: bruce.richardson, aman.deep.singh; +Cc: dev, Paul Greenwalt
From: Paul Greenwalt <paul.greenwalt@intel.com>
Tx Packet Pacing (TXPP) functionality is in addition to the Tx LAN queue
flow, so it has a separate Tx Time queue context.
Add Tx Time queue context data structures, and two new related AQCs.
- ice_aq_set_txtimeq (opcode 0x0C35) to set Tx Time queue(s) context
- ice_aq_operate_txtimeq (opcode 0x0C37) to enable/disable Tx Time
queue(s)
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/net/ice/base/ice_adminq_cmd.h | 55 +++++++++++++++
drivers/net/ice/base/ice_common.c | 96 +++++++++++++++++++++++++++
drivers/net/ice/base/ice_common.h | 9 +++
drivers/net/ice/base/ice_lan_tx_rx.h | 37 +++++++++++
4 files changed, 197 insertions(+)
diff --git a/drivers/net/ice/base/ice_adminq_cmd.h b/drivers/net/ice/base/ice_adminq_cmd.h
index 3ec207927b..1fa8bbad29 100644
--- a/drivers/net/ice/base/ice_adminq_cmd.h
+++ b/drivers/net/ice/base/ice_adminq_cmd.h
@@ -2975,6 +2975,55 @@ struct ice_aqc_move_txqs_data {
struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ u8 txtime_ctx[25];
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[STRUCT_HACK_VAR_LEN];
+};
+
+/* Operate Tx Time Queue (indirect 0x0C37) */
+struct ice_aqc_ena_dis_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 cmd_type;
+#define ICE_AQC_TXTIME_CMD_TYPE_S 0
+#define ICE_AQC_TXTIME_CMD_TYPE_M (0x1 << ICE_AQC_Q_CMD_TYPE_S)
+#define ICE_AQC_TXTIME_CMD_TYPE_Q_ENA 1
+ u8 reserved[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+#pragma pack(1)
+
+struct ice_aqc_ena_dis_txtime_qgrp {
+ u8 reserved[5];
+ __le16 fail_txtime_q;
+ u8 reserved1[1];
+};
+#pragma pack()
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -3297,6 +3346,8 @@ struct ice_aq_desc {
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_move_txqs move_txqs;
+ struct ice_aqc_set_txtimeqs set_txtimeqs;
+ struct ice_aqc_ena_dis_txtimeqs operate_txtimeqs;
struct ice_aqc_txqs_cleanup txqs_cleanup;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
@@ -3576,6 +3627,10 @@ enum ice_adminq_opc {
ice_aqc_opc_txqs_cleanup = 0x0C31,
ice_aqc_opc_move_recfg_txqs = 0x0C32,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+ ice_aqc_opc_ena_dis_txtimeqs = 0x0C37,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
diff --git a/drivers/net/ice/base/ice_common.c b/drivers/net/ice/base/ice_common.c
index c8047ca59f..fce9b070cf 100644
--- a/drivers/net/ice/base/ice_common.c
+++ b/drivers/net/ice/base/ice_common.c
@@ -1660,6 +1660,31 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
return 0;
}
+/* Tx time Queue Context */
+const struct ice_ctx_ele ice_txtime_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 3, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 108),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 109),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 115),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 118),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 119),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 123),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 127),
+ ICE_CTX_STORE(ice_txtime_ctx, int_q_state, 70, 128),
+ { 0 }
+};
+
/* Sideband Queue command wrappers */
/**
@@ -4729,6 +4754,77 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
return status;
}
+/**
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ */
+enum ice_status
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct ice_aq_desc desc;
+ u16 size;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ cmd = &desc.params.set_txtimeqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
+ if (!txtime_qg)
+ return ICE_ERR_PARAM;
+ if (txtimeq > ICE_TXTIME_MAX_QUEUE || q_count < 1 ||
+ q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return ICE_ERR_PARAM;
+ size = ice_struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return ICE_ERR_PARAM;
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ cmd->q_id = CPU_TO_LE16(txtimeq);
+ cmd->q_amount = CPU_TO_LE16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
+}
+
+/**
+ * ice_aq_ena_dis_txtimeq - enable/disable Tx time queue
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @q_ena: enable/disable Tx time queue
+ * @txtime_qg: holds the first Tx time queue that failed enable/disable on
+ * response
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable/disable Tx Time queue (0x0C37)
+ */
+enum ice_status
+ice_aq_ena_dis_txtimeq(struct ice_hw *hw, u16 txtimeq, u16 q_count, bool q_ena,
+ struct ice_aqc_ena_dis_txtime_qgrp *txtime_qg,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_ena_dis_txtimeqs *cmd;
+ struct ice_aq_desc desc;
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ cmd = &desc.params.operate_txtimeqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_ena_dis_txtimeqs);
+ if (!txtime_qg)
+ return ICE_ERR_PARAM;
+ if (txtimeq > ICE_TXTIME_MAX_QUEUE || q_count < 1 ||
+ q_count > ICE_OP_TXTIME_MAX_Q_AMOUNT)
+ return ICE_ERR_PARAM;
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ cmd->q_id = CPU_TO_LE16(txtimeq);
+ cmd->q_amount = CPU_TO_LE16(q_count);
+ if (q_ena)
+ cmd->cmd_type |= ICE_AQC_TXTIME_CMD_TYPE_Q_ENA;
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, sizeof(*txtime_qg), cd);
+}
+
/* End of FW Admin Queue command wrappers */
/**
diff --git a/drivers/net/ice/base/ice_common.h b/drivers/net/ice/base/ice_common.h
index 71b1f34d0a..f7e7ff5aaf 100644
--- a/drivers/net/ice/base/ice_common.h
+++ b/drivers/net/ice/base/ice_common.h
@@ -132,6 +132,15 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
u8 timeout, u32 *blocked_cgds,
struct ice_aqc_move_txqs_data *buf, u16 buf_size,
u8 *txqs_moved, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_ena_dis_txtimeq(struct ice_hw *hw, u16 txtimeq, u16 q_count, bool q_ena,
+ struct ice_aqc_ena_dis_txtime_qgrp *txtime_qg,
+ struct ice_sq_cd *cd);
+extern const struct ice_ctx_ele ice_txtime_ctx_info[];
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
diff --git a/drivers/net/ice/base/ice_lan_tx_rx.h b/drivers/net/ice/base/ice_lan_tx_rx.h
index bcc6e9a716..f92382346f 100644
--- a/drivers/net/ice/base/ice_lan_tx_rx.h
+++ b/drivers/net/ice/base/ice_lan_tx_rx.h
@@ -1275,6 +1275,43 @@ struct ice_ts_desc {
};
#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_OP_TXTIME_MAX_Q_AMOUNT 2047
+/* Tx Time queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+#define ICE_TXTIME_CTX_VMVF_TYPE_VF 0
+#define ICE_TXTIME_CTX_VMVF_TYPE_VMQ 1
+#define ICE_TXTIME_CTX_VMVF_TYPE_PF 2
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u32 qlen; /* bigger than needed, see above for reason */
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
+ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
+};
+
/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
--
2.43.0
^ permalink raw reply [flat|nested] 3+ messages in thread