From: David Hunt <david.hunt@intel.com>
To: dev@dpdk.org
Cc: david.hunt@intel.com, anatoly.burakov@intel.com
Subject: [dpdk-dev] [PATCH v1] lib/power: fix buffer overrun coverity issues
Date: Tue, 9 Apr 2019 10:22:01 +0100 [thread overview]
Message-ID: <20190409092201.7886-1-david.hunt@intel.com> (raw)
Message-ID: <20190409092201.Jp9aEwCdkaGadV2JkMvVs9EVPs5hvbwXZukdJhrFvUs@z> (raw)
A previous change removed the limit of 64 cores by
moving away from 64-bit masks to char arrays. However
this left a buffer overrun issue, where the max channels
was defined as 64, and max cores was defined as 256. These
should all be consistently set to RTE_MAX_LCORE.
The #defines being removed are CHANNEL_CMDS_MAX_CPUS,
CHANNEL_CMDS_MAX_CHANNELS, POWER_MGR_MAX_CPUS, and
CHANNEL_CMDS_MAX_VM_CHANNELS, and are being replaced
with RTE_MAX_LCORE for consistency and simplicity.
Fixes: fd73630e95c1 ("examples/power: change 64-bit masks to arrays")
Coverity issue: 337672
Fixes: fd73630e95c1 ("examples/power: change 64-bit masks to arrays")
Coverity issue: 337673
Fixes: fd73630e95c1 ("examples/power: change 64-bit masks to arrays")
Coverity issue: 337678
Signed-off-by: David Hunt <david.hunt@intel.com>
---
examples/vm_power_manager/channel_manager.c | 74 ++++++++++-----------
examples/vm_power_manager/channel_manager.h | 10 +--
examples/vm_power_manager/power_manager.c | 18 ++---
examples/vm_power_manager/power_manager.h | 2 -
examples/vm_power_manager/vm_power_cli.c | 12 ++--
lib/librte_power/channel_commands.h | 3 -
lib/librte_power/power_kvm_vm.c | 10 +--
7 files changed, 59 insertions(+), 70 deletions(-)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index 0187f79ab..084681747 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -50,9 +50,9 @@ static bool global_hypervisor_available;
*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
- uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
- struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
- char channel_mask[POWER_MGR_MAX_CPUS];
+ uint16_t pcpu_map[RTE_MAX_LCORE];
+ struct channel_info *channels[RTE_MAX_LCORE];
+ char channel_mask[RTE_MAX_LCORE];
uint8_t num_channels;
enum vm_status status;
virDomainPtr domainPtr;
@@ -81,7 +81,7 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
unsigned i, j;
int n_vcpus;
- memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+ memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
if (!virDomainIsActive(vm_info->domainPtr)) {
n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
@@ -96,21 +96,21 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
}
memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
- CHANNEL_CMDS_MAX_CPUS);
+ RTE_MAX_LCORE);
cpuinfo = global_vircpuinfo;
n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
- CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen);
+ RTE_MAX_LCORE, global_cpumaps, global_maplen);
if (n_vcpus < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
"active VM '%s'\n", vm_info->name);
return -1;
}
update_pcpus:
- if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) {
+ if (n_vcpus >= RTE_MAX_LCORE) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
- "0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1);
+ "0...%d\n", n_vcpus, RTE_MAX_LCORE-1);
return -1;
}
if (n_vcpus != vm_info->info.nrVirtCpu) {
@@ -138,9 +138,9 @@ set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
+ if (vcpu >= RTE_MAX_LCORE) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
- vcpu, CHANNEL_CMDS_MAX_CPUS-1);
+ vcpu, RTE_MAX_LCORE-1);
return -1;
}
@@ -162,7 +162,7 @@ set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
"vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
return -1;
}
- memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
+ memset(global_cpumaps, 0, RTE_MAX_LCORE * global_maplen);
VIR_USE_CPU(global_cpumaps, pcpu);
@@ -436,10 +436,10 @@ add_all_channels(const char *vm_name)
dir->d_name);
continue;
}
- if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (channel_num >= RTE_MAX_LCORE) {
RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
"greater than max allowable: %d, skipping '%s%s'\n",
- channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1,
+ channel_num, RTE_MAX_LCORE-1,
CHANNEL_MGR_SOCKET_PATH, dir->d_name);
continue;
}
@@ -495,10 +495,10 @@ add_channels(const char *vm_name, unsigned *channel_list,
for (i = 0; i < len_channel_list; i++) {
- if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (channel_list[i] >= RTE_MAX_LCORE) {
RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
"0...%d\n", channel_list[i],
- CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+ RTE_MAX_LCORE-1);
continue;
}
if (channel_exists(vm_info, channel_list[i])) {
@@ -598,7 +598,7 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
{
struct virtual_machine_info *vm_info;
unsigned i;
- char mask[POWER_MGR_MAX_CPUS];
+ char mask[RTE_MAX_LCORE];
int num_channels_changed = 0;
if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
@@ -614,8 +614,8 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
}
rte_spinlock_lock(&(vm_info->config_spinlock));
- memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
if (mask[i] != 1)
continue;
vm_info->channels[i]->status = status;
@@ -672,7 +672,7 @@ get_all_vm(int *num_vm, int *num_vcpu)
if (!global_hypervisor_available)
return;
- memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+ memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
return;
@@ -725,7 +725,7 @@ get_info_vm(const char *vm_name, struct vm_info *info)
{
struct virtual_machine_info *vm_info;
unsigned i, channel_num = 0;
- char mask[POWER_MGR_MAX_CPUS];
+ char mask[RTE_MAX_LCORE];
vm_info = find_domain_by_name(vm_name);
if (vm_info == NULL) {
@@ -738,8 +738,8 @@ get_info_vm(const char *vm_name, struct vm_info *info)
rte_spinlock_lock(&(vm_info->config_spinlock));
- memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
if (mask[i] != 1)
continue;
info->channels[channel_num].channel_num = i;
@@ -803,17 +803,17 @@ add_vm(const char *vm_name)
rte_free(new_domain);
return -1;
}
- if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) {
+ if (new_domain->info.nrVirtCpu > RTE_MAX_LCORE) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
"greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
- CHANNEL_CMDS_MAX_CPUS);
+ RTE_MAX_LCORE);
rte_free(new_domain);
return -1;
}
- for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
+ for (i = 0; i < RTE_MAX_LCORE; i++)
new_domain->pcpu_map[i] = 0;
- }
+
if (update_pcpus_mask(new_domain) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
rte_free(new_domain);
@@ -821,7 +821,7 @@ add_vm(const char *vm_name)
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
new_domain->name[sizeof(new_domain->name) - 1] = '\0';
- memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
+ memset(new_domain->channel_mask, 0, RTE_MAX_LCORE);
new_domain->num_channels = 0;
if (!virDomainIsActive(dom_ptr))
@@ -896,17 +896,17 @@ channel_manager_init(const char *path __rte_unused)
} else {
global_hypervisor_available = 1;
- global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+ global_maplen = VIR_CPU_MAPLEN(RTE_MAX_LCORE);
global_vircpuinfo = rte_zmalloc(NULL,
sizeof(*global_vircpuinfo) *
- CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
+ RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE);
if (global_vircpuinfo == NULL) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
goto error;
}
global_cpumaps = rte_zmalloc(NULL,
- CHANNEL_CMDS_MAX_CPUS * global_maplen,
+ RTE_MAX_LCORE * global_maplen,
RTE_CACHE_LINE_SIZE);
if (global_cpumaps == NULL)
goto error;
@@ -920,12 +920,12 @@ channel_manager_init(const char *path __rte_unused)
- if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
+ if (global_n_host_cpus > RTE_MAX_LCORE) {
RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
"maximum of %u. No cores over %u should be used.\n",
- global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS,
- CHANNEL_CMDS_MAX_CPUS - 1);
- global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS;
+ global_n_host_cpus, RTE_MAX_LCORE,
+ RTE_MAX_LCORE - 1);
+ global_n_host_cpus = RTE_MAX_LCORE;
}
return 0;
@@ -939,15 +939,15 @@ void
channel_manager_exit(void)
{
unsigned i;
- char mask[POWER_MGR_MAX_CPUS];
+ char mask[RTE_MAX_LCORE];
struct virtual_machine_info *vm_info;
LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
rte_spinlock_lock(&(vm_info->config_spinlock));
- memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
if (mask[i] != 1)
continue;
remove_channel_from_monitor(
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index c3cdce492..251d2163c 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -13,15 +13,9 @@ extern "C" {
#include <sys/un.h>
#include <rte_atomic.h>
-/* Maximum number of CPUs */
-#define CHANNEL_CMDS_MAX_CPUS 256
-
/* Maximum name length including '\0' terminator */
#define CHANNEL_MGR_MAX_NAME_LEN 64
-/* Maximum number of channels to each Virtual Machine */
-#define CHANNEL_MGR_MAX_CHANNELS 256
-
/* Hypervisor Path for libvirt(qemu/KVM) */
#define CHANNEL_MGR_DEFAULT_HV_PATH "qemu:///system"
@@ -78,9 +72,9 @@ struct channel_info {
struct vm_info {
char name[CHANNEL_MGR_MAX_NAME_LEN]; /**< VM name */
enum vm_status status; /**< libvirt status */
- uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU map to vCPU */
+ uint16_t pcpu_map[RTE_MAX_LCORE]; /**< pCPU map to vCPU */
unsigned num_vcpus; /**< number of vCPUS */
- struct channel_info channels[CHANNEL_MGR_MAX_CHANNELS]; /**< Array of channel_info */
+ struct channel_info channels[RTE_MAX_LCORE]; /**< channel_info array */
unsigned num_channels; /**< Number of channels */
};
diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index aef832644..9553455be 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -39,7 +39,7 @@ struct freq_info {
unsigned num_freqs;
} __rte_cache_aligned;
-static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
+static struct freq_info global_core_freq_info[RTE_MAX_LCORE];
struct core_info ci;
@@ -92,8 +92,8 @@ power_manager_init(void)
return -1;
}
- if (ci->core_count > POWER_MGR_MAX_CPUS)
- max_core_num = POWER_MGR_MAX_CPUS;
+ if (ci->core_count > RTE_MAX_LCORE)
+ max_core_num = RTE_MAX_LCORE;
else
max_core_num = ci->core_count;
@@ -132,9 +132,9 @@ power_manager_get_current_frequency(unsigned core_num)
{
uint32_t freq, index;
- if (core_num >= POWER_MGR_MAX_CPUS) {
+ if (core_num >= RTE_MAX_LCORE) {
RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
- core_num, POWER_MGR_MAX_CPUS-1);
+ core_num, RTE_MAX_LCORE-1);
return -1;
}
if (!(ci.cd[core_num].global_enabled_cpus))
@@ -143,7 +143,7 @@ power_manager_get_current_frequency(unsigned core_num)
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
index = rte_power_get_freq(core_num);
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
- if (index >= POWER_MGR_MAX_CPUS)
+ if (index >= RTE_MAX_LCORE)
freq = 0;
else
freq = global_core_freq_info[core_num].freqs[index];
@@ -166,8 +166,8 @@ power_manager_exit(void)
return -1;
}
- if (ci->core_count > POWER_MGR_MAX_CPUS)
- max_core_num = POWER_MGR_MAX_CPUS;
+ if (ci->core_count > RTE_MAX_LCORE)
+ max_core_num = RTE_MAX_LCORE;
else
max_core_num = ci->core_count;
@@ -246,7 +246,7 @@ power_manager_scale_core_med(unsigned int core_num)
struct core_info *ci;
ci = get_core_info();
- if (core_num >= POWER_MGR_MAX_CPUS)
+ if (core_num >= RTE_MAX_LCORE)
return -1;
if (!(ci->cd[core_num].global_enabled_cpus))
return -1;
diff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h
index c3673844c..e81a60ae5 100644
--- a/examples/vm_power_manager/power_manager.h
+++ b/examples/vm_power_manager/power_manager.h
@@ -32,8 +32,6 @@ core_info_init(void);
#define RTE_LOGTYPE_POWER_MANAGER RTE_LOGTYPE_USER1
-/* Maximum number of CPUS to manage */
-#define POWER_MGR_MAX_CPUS 256
/**
* Initialize power management.
* Initializes resources and verifies the number of CPUs on the system.
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index 41e89ff20..89b000d92 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -225,7 +225,7 @@ cmd_channels_op_parsed(void *parsed_result, struct cmdline *cl,
{
unsigned num_channels = 0, channel_num, i;
int channels_added;
- unsigned channel_list[CHANNEL_CMDS_MAX_VM_CHANNELS];
+ unsigned int channel_list[RTE_MAX_LCORE];
char *token, *remaining, *tail_ptr;
struct cmd_channels_op_result *res = parsed_result;
@@ -249,10 +249,10 @@ cmd_channels_op_parsed(void *parsed_result, struct cmdline *cl,
if ((errno != 0) || tail_ptr == NULL || (*tail_ptr != '\0'))
break;
- if (channel_num == CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (channel_num == RTE_MAX_LCORE) {
cmdline_printf(cl, "Channel number '%u' exceeds the maximum number "
"of allowable channels(%u) for VM '%s'\n", channel_num,
- CHANNEL_CMDS_MAX_VM_CHANNELS, res->vm_name);
+ RTE_MAX_LCORE, res->vm_name);
return;
}
channel_list[num_channels++] = channel_num;
@@ -306,7 +306,7 @@ cmd_channels_status_op_parsed(void *parsed_result, struct cmdline *cl,
{
unsigned num_channels = 0, channel_num;
int changed;
- unsigned channel_list[CHANNEL_CMDS_MAX_VM_CHANNELS];
+ unsigned int channel_list[RTE_MAX_LCORE];
char *token, *remaining, *tail_ptr;
struct cmd_channels_status_op_result *res = parsed_result;
enum channel_status status;
@@ -334,10 +334,10 @@ cmd_channels_status_op_parsed(void *parsed_result, struct cmdline *cl,
if ((errno != 0) || tail_ptr == NULL || (*tail_ptr != '\0'))
break;
- if (channel_num == CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (channel_num == RTE_MAX_LCORE) {
cmdline_printf(cl, "%u exceeds the maximum number of allowable "
"channels(%u) for VM '%s'\n", channel_num,
- CHANNEL_CMDS_MAX_VM_CHANNELS, res->vm_name);
+ RTE_MAX_LCORE, res->vm_name);
return;
}
channel_list[num_channels++] = channel_num;
diff --git a/lib/librte_power/channel_commands.h b/lib/librte_power/channel_commands.h
index e7b93a797..eca8ff70c 100644
--- a/lib/librte_power/channel_commands.h
+++ b/lib/librte_power/channel_commands.h
@@ -12,9 +12,6 @@ extern "C" {
#include <stdint.h>
#include <stdbool.h>
-/* Maximum number of channels per VM */
-#define CHANNEL_CMDS_MAX_VM_CHANNELS 64
-
/* Valid Commands */
#define CPU_POWER 1
#define CPU_POWER_CONNECT 2
diff --git a/lib/librte_power/power_kvm_vm.c b/lib/librte_power/power_kvm_vm.c
index 20659b72f..277ebbeae 100644
--- a/lib/librte_power/power_kvm_vm.c
+++ b/lib/librte_power/power_kvm_vm.c
@@ -13,15 +13,15 @@
#define FD_PATH "/dev/virtio-ports/virtio.serial.port.poweragent"
-static struct channel_packet pkt[CHANNEL_CMDS_MAX_VM_CHANNELS];
+static struct channel_packet pkt[RTE_MAX_LCORE];
int
power_kvm_vm_init(unsigned int lcore_id)
{
- if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (lcore_id >= RTE_MAX_LCORE) {
RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
- lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+ lcore_id, RTE_MAX_LCORE-1);
return -1;
}
pkt[lcore_id].command = CPU_POWER;
@@ -68,9 +68,9 @@ send_msg(unsigned int lcore_id, uint32_t scale_direction)
{
int ret;
- if (lcore_id >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+ if (lcore_id >= RTE_MAX_LCORE) {
RTE_LOG(ERR, POWER, "Core(%u) is out of range 0...%d\n",
- lcore_id, CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+ lcore_id, RTE_MAX_LCORE-1);
return -1;
}
pkt[lcore_id].unit = scale_direction;
--
2.17.1
next reply other threads:[~2019-04-09 9:22 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-09 9:22 David Hunt [this message]
2019-04-09 9:22 ` David Hunt
2019-04-09 11:18 ` Burakov, Anatoly
2019-04-09 11:18 ` Burakov, Anatoly
2019-04-09 14:40 ` Hunt, David
2019-04-09 14:40 ` Hunt, David
2019-04-10 7:52 ` Ferruh Yigit
2019-04-10 7:52 ` Ferruh Yigit
2019-04-10 10:55 ` Burakov, Anatoly
2019-04-10 10:55 ` Burakov, Anatoly
2019-04-10 14:17 ` Ferruh Yigit
2019-04-10 14:17 ` Ferruh Yigit
2019-04-22 20:55 ` Thomas Monjalon
2019-04-22 20:55 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190409092201.7886-1-david.hunt@intel.com \
--to=david.hunt@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).