From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D0F164410E; Thu, 30 May 2024 13:17:38 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 89D3F40ED1; Thu, 30 May 2024 13:15:27 +0200 (CEST) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.9]) by mails.dpdk.org (Postfix) with ESMTP id 57BF3427E3 for ; Thu, 30 May 2024 13:15:13 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1717067714; x=1748603714; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=OobFcG+dNEP+kN512gQkyLoxzhA3aobf6672SJFm7uk=; b=hUjFxAg2ap18EY/M0RPWWILs8sVzG/6IhRhLVUFftoN5on9D3pnUR2Z+ SZCSMSgmi9Oo3QNcdAOMqaHhGCVUZ1i2BETjhjlpHF/PiK03NwDqSSpsU wHl+me5EzqpHAmDeWX2WoJmLw/M8NbIXrNvAMqIRP7o5A6vNlp/mlD6jp oGjiAqaSPIXKY2OK18LVxhNe/pVILBj9q1ZzhFrZ3Z2dWvTusjOTsb+3c l5nj+SlySOaHPde7fH5NtnxX0+ufKDKSEJEooITxgqiXk8xP+ALODcQFr oZO8/INgMmhVBJXtlGKnIoR0Rau9Ln2LYuQukTyr/ELnNUtq7Gje9Tsta Q==; X-CSE-ConnectionGUID: 2BL3dsjoRsSDBVakfvHw2A== X-CSE-MsgGUID: GtZcYpXYSRKbxdGL9avCbA== X-IronPort-AV: E=McAfee;i="6600,9927,11087"; a="36063951" X-IronPort-AV: E=Sophos;i="6.08,201,1712646000"; d="scan'208";a="36063951" Received: from orviesa007.jf.intel.com ([10.64.159.147]) by orvoesa101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 May 2024 04:15:13 -0700 X-CSE-ConnectionGUID: vd3NrFnjR4OBp+iYkSmaRg== X-CSE-MsgGUID: rayd+4w+SR+39sDbK7ipxA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.08,201,1712646000"; d="scan'208";a="36419502" Received: from silpixa00401119.ir.intel.com ([10.55.129.167]) by orviesa007.jf.intel.com with ESMTP; 30 May 2024 04:15:10 -0700 From: Anatoly Burakov To: dev@dpdk.org Cc: Piotr Kwapulinski , vladimir.medvedkin@intel.com, bruce.richardson@intel.com, Stefan Wegrzyn , Jedrzej Jagielski Subject: [PATCH v3 24/30] net/ixgbe/base: add support for NVM handling in E610 device Date: Thu, 30 May 2024 12:13:57 +0100 Message-ID: <5578167c8ed177a6fba5e559e132990f8107bc60.1717067519.git.anatoly.burakov@intel.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Piotr Kwapulinski Add low level support for accessing NVM in E610 device. NVM operations are handled via the Admin Command Interface. Signed-off-by: Stefan Wegrzyn Signed-off-by: Jedrzej Jagielski Signed-off-by: Piotr Kwapulinski Signed-off-by: Anatoly Burakov --- drivers/net/ixgbe/base/ixgbe_e610.c | 325 ++++++++++++++++++++++++++++ drivers/net/ixgbe/base/ixgbe_e610.h | 16 ++ 2 files changed, 341 insertions(+) diff --git a/drivers/net/ixgbe/base/ixgbe_e610.c b/drivers/net/ixgbe/base/ixgbe_e610.c index f00789e1aa..f36b4aa1a3 100644 --- a/drivers/net/ixgbe/base/ixgbe_e610.c +++ b/drivers/net/ixgbe/base/ixgbe_e610.c @@ -1832,6 +1832,253 @@ s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr, return status; } +/** + * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) + * + * Request NVM ownership. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, GLNVM_FLA); + if ((fla & GLNVM_FLA_LOCKED_M) == 0) + return IXGBE_SUCCESS; + + return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access, + IXGBE_NVM_TIMEOUT); +} + +/** + * ixgbe_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure + * + * Release NVM ownership. + */ +void ixgbe_release_nvm(struct ixgbe_hw *hw) +{ + u32 fla; + + /* Skip if we are in blank NVM programming mode */ + fla = IXGBE_READ_REG(hw, GLNVM_FLA); + if ((fla & GLNVM_FLA_LOCKED_M) == 0) + return; + + ixgbe_release_res(hw, IXGBE_NVM_RES_ID); +} + + +/** + * ixgbe_aci_read_nvm - read NVM + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be read (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @read_shadow_ram: tell if this is a shadow RAM read + * + * Read the NVM using ACI command (0x0701). + * + * Return: the exit code of the operation. + */ +s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram) +{ + struct ixgbe_aci_desc desc; + struct ixgbe_aci_cmd_nvm *cmd; + + cmd = &desc.params.nvm; + + if (offset > IXGBE_ACI_NVM_MAX_OFFSET) + return IXGBE_ERR_PARAM; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read); + + if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT) + cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD; + cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid); + cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = IXGBE_CPU_TO_LE16(length); + + return ixgbe_aci_send_cmd(hw, &desc, data, length); +} + +/** + * ixgbe_nvm_validate_checksum - validate checksum + * @hw: pointer to the HW struct + * + * Verify NVM PFA checksum validity using ACI command (0x0706). + * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_nvm_checksum *cmd; + struct ixgbe_aci_desc desc; + s32 status; + + status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (status) + return status; + + cmd = &desc.params.nvm_checksum; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); + cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY; + + status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + ixgbe_release_nvm(hw); + + if (!status) + if (IXGBE_LE16_TO_CPU(cmd->checksum) != + IXGBE_ACI_NVM_CHECKSUM_CORRECT) { + ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, + "Invalid Shadow Ram checksum"); + status = IXGBE_ERR_NVM_CHECKSUM; + } + + return status; +} + +/** + * ixgbe_nvm_recalculate_checksum - recalculate checksum + * @hw: pointer to the HW struct + * + * Recalculate NVM PFA checksum using ACI command (0x0706). + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw) +{ + struct ixgbe_aci_cmd_nvm_checksum *cmd; + struct ixgbe_aci_desc desc; + s32 status; + + status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (status) + return status; + + cmd = &desc.params.nvm_checksum; + + ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum); + cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC; + + status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0); + + ixgbe_release_nvm(hw); + + return status; +} + +/** + * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + u32 bytes = sizeof(u16); + __le16 data_local; + s32 status; + + status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes, + (u8 *)&data_local, true); + if (status) + return status; + + *data = IXGBE_LE16_TO_CPU(data_local); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_flat_nvm - Read portion of NVM by flat offset + * @hw: pointer to the HW struct + * @offset: offset from beginning of NVM + * @length: (in) number of bytes to read; (out) number of bytes actually read + * @data: buffer to return data in (sized to fit the specified length) + * @read_shadow_ram: if true, read from shadow RAM instead of NVM + * + * Reads a portion of the NVM, as a flat memory space. This function correctly + * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size + * from being exceeded in case of Shadow RAM read requests and ensures that no + * single read request exceeds the maximum 4KB read for a single admin command. + * + * Returns a status code on failure. Note that the data pointer may be + * partially updated if some reads succeed before a failure. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram) +{ + u32 inlen = *length; + u32 bytes_read = 0; + bool last_cmd; + s32 status; + + *length = 0; + + /* Verify the length of the read if this is for the Shadow RAM */ + if (read_shadow_ram && ((offset + inlen) > + (hw->eeprom.word_size * 2u))) { + return IXGBE_ERR_PARAM; + } + + do { + u32 read_size, sector_offset; + + /* ixgbe_aci_read_nvm cannot read more than 4KB at a time. + * Additionally, a read from the Shadow RAM may not cross over + * a sector boundary. Conveniently, the sector size is also 4KB. + */ + sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE; + read_size = MIN_T(u32, + IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset, + inlen - bytes_read); + + last_cmd = !(bytes_read + read_size < inlen); + + /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size + * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE + * maximum size guarantees that it will fit within the 2 bytes. + */ + status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram); + if (status) + break; + + bytes_read += read_size; + offset += read_size; + } while (!last_cmd); + + *length = bytes_read; + return status; +} + /** * ixgbe_aci_get_internal_data - get internal FW/HW data * @hw: pointer to the hardware structure @@ -2728,3 +2975,81 @@ s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw) return status; } + +/** + * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the ACI. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding with reading. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + status = ixgbe_init_eeprom_params(hw); + if (status) + return status; + } + + status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (status) + return status; + + status = ixgbe_read_sr_word_aci(hw, offset, data); + ixgbe_release_nvm(hw); + + return status; +} + +/** + * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + * If the EEPROM params are not initialized, the function + * initialize them before proceeding. + * The function acquires and then releases the NVM ownership. + * + * Return: the exit code of the operation. + */ +s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val) +{ + u32 status; + + if (hw->eeprom.type == ixgbe_eeprom_uninitialized) { + status = ixgbe_init_eeprom_params(hw); + if (status) + return status; + } + + status = ixgbe_nvm_validate_checksum(hw); + + if (status) + return status; + + if (checksum_val) { + u16 tmp_checksum; + status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ); + if (status) + return status; + + status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD, + &tmp_checksum); + ixgbe_release_nvm(hw); + + if (!status) + *checksum_val = tmp_checksum; + } + + return status; +} diff --git a/drivers/net/ixgbe/base/ixgbe_e610.h b/drivers/net/ixgbe/base/ixgbe_e610.h index d689555826..78ff329107 100644 --- a/drivers/net/ixgbe/base/ixgbe_e610.h +++ b/drivers/net/ixgbe/base/ixgbe_e610.h @@ -56,6 +56,20 @@ s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx, s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data, u8 length, bool write); +s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw, + enum ixgbe_aci_res_access_type access); +void ixgbe_release_nvm(struct ixgbe_hw *hw); + +s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram); + +s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw); +s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length, + u8 *data, bool read_shadow_ram); s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id, u16 table_id, u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, @@ -90,5 +104,7 @@ s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset, s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw); s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on); s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw); +s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val); #endif /* _IXGBE_E610_H_ */ -- 2.43.0