From: Tejasree Kondoj <ktejasree@marvell.com>
To: Akhil Goyal <gakhil@marvell.com>
Cc: Vidya Sagar Velumuri <vvelumuri@marvell.com>,
Anoob Joseph <anoobj@marvell.com>,
Aakash Sasidharan <asasidharan@marvell.com>,
"Nithinsen Kaithakadan" <nkaithakadan@marvell.com>,
Rupesh Chiluka <rchiluka@marvell.com>, <dev@dpdk.org>
Subject: [PATCH 24/40] crypto/cnxk: add tls read session creation
Date: Fri, 23 May 2025 19:20:55 +0530 [thread overview]
Message-ID: <20250523135111.2178408-25-ktejasree@marvell.com> (raw)
In-Reply-To: <20250523135111.2178408-1-ktejasree@marvell.com>
From: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Add session creation for tls read for cn20k
Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
drivers/crypto/cnxk/cn20k_tls.c | 329 +++++++++++++++++++++++++++++++-
1 file changed, 327 insertions(+), 2 deletions(-)
diff --git a/drivers/crypto/cnxk/cn20k_tls.c b/drivers/crypto/cnxk/cn20k_tls.c
index 40fe48ae69..4a68edf731 100644
--- a/drivers/crypto/cnxk/cn20k_tls.c
+++ b/drivers/crypto/cnxk/cn20k_tls.c
@@ -461,6 +461,330 @@ cn20k_tls_read_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
return ret;
}
+static int
+tls_write_rlens_get(struct rte_security_tls_record_xform *tls_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ enum rte_crypto_cipher_algorithm c_algo = RTE_CRYPTO_CIPHER_NULL;
+ enum rte_crypto_auth_algorithm a_algo = RTE_CRYPTO_AUTH_NULL;
+ uint8_t roundup_byte, tls_hdr_len;
+ uint8_t mac_len, iv_len;
+
+ switch (tls_xfrm->ver) {
+ case RTE_SECURITY_VERSION_TLS_1_2:
+ case RTE_SECURITY_VERSION_TLS_1_3:
+ tls_hdr_len = 5;
+ break;
+ case RTE_SECURITY_VERSION_DTLS_1_2:
+ tls_hdr_len = 13;
+ break;
+ default:
+ tls_hdr_len = 0;
+ break;
+ }
+
+ /* Get Cipher and Auth algo */
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return tls_hdr_len + ROC_CPT_AES_GCM_IV_LEN + ROC_CPT_AES_GCM_MAC_LEN;
+
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ c_algo = crypto_xfrm->cipher.algo;
+ if (crypto_xfrm->next)
+ a_algo = crypto_xfrm->next->auth.algo;
+ } else {
+ a_algo = crypto_xfrm->auth.algo;
+ if (crypto_xfrm->next)
+ c_algo = crypto_xfrm->next->cipher.algo;
+ }
+
+ switch (c_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ roundup_byte = 4;
+ iv_len = 0;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ roundup_byte = ROC_CPT_DES_BLOCK_LENGTH;
+ iv_len = ROC_CPT_DES_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ roundup_byte = ROC_CPT_AES_BLOCK_LENGTH;
+ iv_len = ROC_CPT_AES_CBC_IV_LEN;
+ break;
+ default:
+ roundup_byte = 0;
+ iv_len = 0;
+ break;
+ }
+
+ switch (a_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ mac_len = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ mac_len = 16;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ mac_len = 20;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ mac_len = 32;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ mac_len = 32;
+ break;
+ default:
+ mac_len = 0;
+ break;
+ }
+
+ return tls_hdr_len + iv_len + mac_len + roundup_byte;
+}
+
+static int
+tls_write_sa_fill(struct roc_ie_ow_tls_write_sa *write_sa,
+ struct rte_security_tls_record_xform *tls_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ enum rte_security_tls_version tls_ver = tls_xfrm->ver;
+ struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
+ const uint8_t *key = NULL;
+ uint8_t *cipher_key;
+ uint64_t *tmp_key;
+ int i, length = 0;
+ size_t offset;
+
+ if (tls_ver == RTE_SECURITY_VERSION_TLS_1_2) {
+ write_sa->w2.s.version_select = ROC_IE_OW_TLS_VERSION_TLS_12;
+ write_sa->tls_12.seq_num = tls_xfrm->tls_1_2.seq_no - 1;
+ } else if (tls_ver == RTE_SECURITY_VERSION_DTLS_1_2) {
+ write_sa->w2.s.version_select = ROC_IE_OW_TLS_VERSION_DTLS_12;
+ write_sa->tls_12.seq_num = ((uint64_t)tls_xfrm->dtls_1_2.epoch << 48) |
+ (tls_xfrm->dtls_1_2.seq_no & 0x0000ffffffffffff);
+ write_sa->tls_12.seq_num -= 1;
+ } else if (tls_ver == RTE_SECURITY_VERSION_TLS_1_3) {
+ write_sa->w2.s.version_select = ROC_IE_OW_TLS_VERSION_TLS_13;
+ write_sa->tls_13.seq_num = tls_xfrm->tls_1_3.seq_no - 1;
+ }
+
+ cipher_key = write_sa->cipher_key;
+
+ /* Set encryption algorithm */
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ length = crypto_xfrm->aead.key.length;
+ if (crypto_xfrm->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ write_sa->w2.s.cipher_select = ROC_IE_OW_TLS_CIPHER_AES_GCM;
+ if (length == 16)
+ write_sa->w2.s.aes_key_len = ROC_IE_OW_TLS_AES_KEY_LEN_128;
+ else
+ write_sa->w2.s.aes_key_len = ROC_IE_OW_TLS_AES_KEY_LEN_256;
+ }
+ if (crypto_xfrm->aead.algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
+ write_sa->w2.s.cipher_select = ROC_IE_OW_TLS_CIPHER_CHACHA_POLY;
+ write_sa->w2.s.aes_key_len = ROC_IE_OW_TLS_AES_KEY_LEN_256;
+ }
+
+ key = crypto_xfrm->aead.key.data;
+ memcpy(cipher_key, key, length);
+
+ if (tls_ver == RTE_SECURITY_VERSION_TLS_1_2)
+ memcpy(((uint8_t *)cipher_key + 32), &tls_xfrm->tls_1_2.imp_nonce, 4);
+ else if (tls_ver == RTE_SECURITY_VERSION_DTLS_1_2)
+ memcpy(((uint8_t *)cipher_key + 32), &tls_xfrm->dtls_1_2.imp_nonce, 4);
+ else if (tls_ver == RTE_SECURITY_VERSION_TLS_1_3)
+ memcpy(((uint8_t *)cipher_key + 32), &tls_xfrm->tls_1_3.imp_nonce, 12);
+
+ goto key_swap;
+ }
+
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xfrm = crypto_xfrm;
+ cipher_xfrm = crypto_xfrm->next;
+ } else {
+ cipher_xfrm = crypto_xfrm;
+ auth_xfrm = crypto_xfrm->next;
+ }
+
+ if (cipher_xfrm != NULL) {
+ if (cipher_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC) {
+ write_sa->w2.s.cipher_select = ROC_IE_OW_TLS_CIPHER_3DES;
+ length = cipher_xfrm->cipher.key.length;
+ } else if (cipher_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ write_sa->w2.s.cipher_select = ROC_IE_OW_TLS_CIPHER_AES_CBC;
+ length = cipher_xfrm->cipher.key.length;
+ if (length == 16)
+ write_sa->w2.s.aes_key_len = ROC_IE_OW_TLS_AES_KEY_LEN_128;
+ else if (length == 32)
+ write_sa->w2.s.aes_key_len = ROC_IE_OW_TLS_AES_KEY_LEN_256;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ key = cipher_xfrm->cipher.key.data;
+ if (key != NULL && length != 0) {
+ /* Copy encryption key */
+ memcpy(cipher_key, key, length);
+ }
+ }
+
+ if (auth_xfrm != NULL) {
+ if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_MD5_HMAC)
+ write_sa->w2.s.mac_select = ROC_IE_OW_TLS_MAC_MD5;
+ else if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
+ write_sa->w2.s.mac_select = ROC_IE_OW_TLS_MAC_SHA1;
+ else if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
+ write_sa->w2.s.mac_select = ROC_IE_OW_TLS_MAC_SHA2_256;
+ else if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA384_HMAC)
+ write_sa->w2.s.mac_select = ROC_IE_OW_TLS_MAC_SHA2_384;
+ else
+ return -EINVAL;
+
+ roc_se_hmac_opad_ipad_gen(write_sa->w2.s.mac_select, auth_xfrm->auth.key.data,
+ auth_xfrm->auth.key.length, write_sa->tls_12.opad_ipad,
+ ROC_SE_TLS);
+ }
+
+ tmp_key = (uint64_t *)write_sa->tls_12.opad_ipad;
+ for (i = 0; i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+
+key_swap:
+ tmp_key = (uint64_t *)cipher_key;
+ for (i = 0; i < (int)(ROC_IE_OW_TLS_CTX_MAX_KEY_IV_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+
+ write_sa->w0.s.ctx_hdr_size = ROC_IE_OW_TLS_CTX_HDR_SIZE;
+ /* Entire context size in 128B units */
+ write_sa->w0.s.ctx_size =
+ (PLT_ALIGN_CEIL(sizeof(struct roc_ie_ow_tls_write_sa), ROC_CTX_UNIT_128B) /
+ ROC_CTX_UNIT_128B) -
+ 1;
+ offset = offsetof(struct roc_ie_ow_tls_write_sa, tls_12.w26_rsvd7);
+
+ if (tls_ver == RTE_SECURITY_VERSION_TLS_1_3) {
+ offset = offsetof(struct roc_ie_ow_tls_write_sa, tls_13.w10_rsvd7);
+ write_sa->w0.s.ctx_size -= 1;
+ }
+
+ /* Word offset for HW managed CTX field */
+ write_sa->w0.s.hw_ctx_off = offset / 8;
+ write_sa->w0.s.ctx_push_size = write_sa->w0.s.hw_ctx_off;
+
+ write_sa->w0.s.aop_valid = 1;
+
+ write_sa->w2.s.iv_at_cptr = ROC_IE_OW_TLS_IV_SRC_DEFAULT;
+
+ if (write_sa->w2.s.version_select != ROC_IE_OW_TLS_VERSION_TLS_13) {
+#ifdef LA_IPSEC_DEBUG
+ if (tls_xfrm->options.iv_gen_disable == 1)
+ write_sa->w2.s.iv_at_cptr = ROC_IE_OW_TLS_IV_SRC_FROM_SA;
+#else
+ if (tls_xfrm->options.iv_gen_disable) {
+ plt_err("Application provided IV is not supported");
+ return -ENOTSUP;
+ }
+#endif
+ }
+
+ rte_wmb();
+
+ return 0;
+}
+
+static int
+cn20k_tls_write_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
+ struct rte_security_tls_record_xform *tls_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm,
+ struct cn20k_sec_session *sec_sess)
+{
+ struct roc_ie_ow_tls_write_sa *sa_dptr;
+ uint8_t tls_ver = tls_xfrm->ver;
+ struct cn20k_tls_record *tls;
+ union cpt_inst_w4 inst_w4;
+ void *write_sa;
+ int ret = 0;
+
+ tls = &sec_sess->tls_rec;
+ write_sa = &tls->write_sa;
+
+ /* Allocate memory to be used as dptr for CPT ucode WRITE_SA op */
+ sa_dptr = plt_zmalloc(sizeof(struct roc_ie_ow_tls_write_sa), 8);
+ if (sa_dptr == NULL) {
+ plt_err("Couldn't allocate memory for SA dptr");
+ return -ENOMEM;
+ }
+
+ /* Translate security parameters to SA */
+ ret = tls_write_sa_fill(sa_dptr, tls_xfrm, crypto_xfrm);
+ if (ret) {
+ plt_err("Could not fill write session parameters");
+ goto sa_dptr_free;
+ }
+
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ sec_sess->iv_offset = crypto_xfrm->aead.iv.offset;
+ sec_sess->iv_length = crypto_xfrm->aead.iv.length;
+ } else if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ sec_sess->iv_offset = crypto_xfrm->cipher.iv.offset;
+ sec_sess->iv_length = crypto_xfrm->cipher.iv.length;
+ } else {
+ sec_sess->iv_offset = crypto_xfrm->next->cipher.iv.offset;
+ sec_sess->iv_length = crypto_xfrm->next->cipher.iv.length;
+ }
+
+ sec_sess->tls_opt.is_write = 1;
+ sec_sess->tls_opt.pad_shift = 0;
+ sec_sess->tls_opt.tls_ver = tls_ver;
+ sec_sess->tls_opt.enable_padding = tls_xfrm->options.extra_padding_enable;
+ sec_sess->max_extended_len = tls_write_rlens_get(tls_xfrm, crypto_xfrm);
+ sec_sess->proto = RTE_SECURITY_PROTOCOL_TLS_RECORD;
+
+ /* pre-populate CPT INST word 4 */
+ inst_w4.u64 = 0;
+ if ((tls_ver == RTE_SECURITY_VERSION_TLS_1_2) ||
+ (tls_ver == RTE_SECURITY_VERSION_DTLS_1_2)) {
+ inst_w4.s.opcode_major = ROC_IE_OW_TLS_MAJOR_OP_RECORD_ENC | ROC_IE_OW_INPLACE_BIT;
+ if (sa_dptr->w2.s.cipher_select == ROC_IE_OW_TLS_CIPHER_3DES)
+ sec_sess->tls_opt.pad_shift = 3;
+ else
+ sec_sess->tls_opt.pad_shift = 4;
+ } else if (tls_ver == RTE_SECURITY_VERSION_TLS_1_3) {
+ inst_w4.s.opcode_major =
+ ROC_IE_OW_TLS13_MAJOR_OP_RECORD_ENC | ROC_IE_OW_INPLACE_BIT;
+ }
+ sec_sess->inst.w4 = inst_w4.u64;
+ sec_sess->inst.w7 = cnxk_cpt_sec_inst_w7_get(roc_cpt, write_sa);
+
+ memset(write_sa, 0, sizeof(struct roc_ie_ow_tls_write_sa));
+
+ /* Copy word0 from sa_dptr to populate ctx_push_sz ctx_size fields */
+ memcpy(write_sa, sa_dptr, 8);
+
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
+
+ /* Write session using microcode opcode */
+ ret = roc_cpt_ctx_write(lf, sa_dptr, write_sa, sizeof(struct roc_ie_ow_tls_write_sa));
+ if (ret) {
+ plt_err("Could not write tls write session to hardware");
+ goto sa_dptr_free;
+ }
+
+ /* Trigger CTX flush so that data is written back to DRAM */
+ ret = roc_cpt_lf_ctx_flush(lf, write_sa, false);
+ if (ret == -EFAULT) {
+ plt_err("Could not flush TLS write session to hardware");
+ goto sa_dptr_free;
+ }
+
+ rte_atomic_thread_fence(rte_memory_order_seq_cst);
+
+sa_dptr_free:
+ plt_free(sa_dptr);
+
+ return ret;
+}
+
int
cn20k_tls_record_session_update(struct cnxk_cpt_vf *vf, struct cnxk_cpt_qp *qp,
struct cn20k_sec_session *sess,
@@ -492,8 +816,9 @@ cn20k_tls_record_session_create(struct cnxk_cpt_vf *vf, struct cnxk_cpt_qp *qp,
if (tls_xfrm->type == RTE_SECURITY_TLS_SESS_TYPE_READ)
return cn20k_tls_read_sa_create(roc_cpt, &qp->lf, tls_xfrm, crypto_xfrm,
(struct cn20k_sec_session *)sess);
-
- return -ENOTSUP;
+ else
+ return cn20k_tls_write_sa_create(roc_cpt, &qp->lf, tls_xfrm, crypto_xfrm,
+ (struct cn20k_sec_session *)sess);
}
int
--
2.25.1
next prev parent reply other threads:[~2025-05-23 13:54 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-23 13:50 [PATCH 00/40] fixes and new features to cnxk crypto PMD Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 01/40] crypto/cnxk: update the sg list population Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 02/40] crypto/cnxk: add lookaside IPsec CPT LF stats Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 03/40] crypto/cnxk: fix qp stats PMD API Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 04/40] crypto/cnxk: fail Rx inject configure if not supported Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 05/40] crypto/cnxk: add check for max supported gather entries Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 06/40] crypto/cnxk: enable IV from application support Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 07/40] crypto/cnxk: add probe for cn20k crypto device Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 08/40] crypto/cnxk: add ops skeleton for cn20k Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 09/40] crypto/cnxk: add dev info get Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 10/40] crypto/cnxk: add skeletion for enq deq functions Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 11/40] crypto/cnxk: add lmtst routines for cn20k Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 12/40] crypto/cnxk: add enqueue function support Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 13/40] crypto/cnxk: add cryptodev dequeue support for cn20k Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 14/40] crypto/cnxk: move debug dumps to common Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 15/40] crypto/cnxk: add rte security skeletion for cn20k Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 16/40] crypto/cnxk: add security session creation Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 17/40] crypto/cnxk: add security session destroy Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 18/40] crypto/cnxk: move code to common Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 19/40] crypto/cnxk: add rte sec session update Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 20/40] crypto/cnxk: add rte security datapath handling Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 21/40] crypto/cnxk: add Rx inject in security lookaside Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 22/40] crypto/cnxk: add skeleton for tls Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 23/40] crypto/cnxk: add tls write session creation Tejasree Kondoj
2025-05-23 13:50 ` Tejasree Kondoj [this message]
2025-05-23 13:50 ` [PATCH 25/40] crypto/cnxk: add tls session destroy Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 26/40] crypto/cnxk: add enq and dequeue support for TLS Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 27/40] crypto/cnxk: tls post process Tejasree Kondoj
2025-05-23 13:50 ` [PATCH 28/40] crypto/cnxk: add tls session update Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 29/40] crypto/cnxk: include required headers Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 30/40] crypto/cnxk: support raw API for cn20k Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 31/40] crypto/cnxk: add model check " Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 32/40] common/cnxk: fix salt handling with aes-ctr Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 33/40] common/cnxk: set correct salt value for ctr algos Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 34/40] crypto/cnxk: extend check for max supported gather entries Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 35/40] crypto/cnxk: add struct variable for custom metadata Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 36/40] crypto/cnxk: add asym sessionless handling Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 37/40] crypto/cnxk: add support for sessionless asym Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 38/40] doc: update CN20K CPT documentation Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 39/40] common/cnxk: update qsize in CPT iq enable Tejasree Kondoj
2025-05-23 13:51 ` [PATCH 40/40] crypto/cnxk: copy 8B iv into sess in aes ctr Tejasree Kondoj
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250523135111.2178408-25-ktejasree@marvell.com \
--to=ktejasree@marvell.com \
--cc=anoobj@marvell.com \
--cc=asasidharan@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=nkaithakadan@marvell.com \
--cc=rchiluka@marvell.com \
--cc=vvelumuri@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).