From: Andrew Rybchenko <arybchenko@solarflare.com>
To: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 24/30] net/sfc: move RxQ shared information to adapter shared
Date: Thu, 7 Feb 2019 12:17:47 +0000 [thread overview]
Message-ID: <1549541873-17403-25-git-send-email-arybchenko@solarflare.com> (raw)
In-Reply-To: <1549541873-17403-1-git-send-email-arybchenko@solarflare.com>
Prepare to make sfc_adapter primary process private data.
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
---
drivers/net/sfc/sfc.h | 11 +++-
drivers/net/sfc/sfc_ethdev.c | 22 +++++---
drivers/net/sfc/sfc_flow.c | 6 +-
drivers/net/sfc/sfc_rx.c | 105 ++++++++++++++++++-----------------
4 files changed, 80 insertions(+), 64 deletions(-)
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 890a04d0e..21568f959 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -176,6 +176,9 @@ struct sfc_rss {
/* Adapter private data shared by primary and secondary processes */
struct sfc_adapter_shared {
+ unsigned int rxq_count;
+ struct sfc_rxq_info *rxq_info;
+
struct rte_pci_addr pci_addr;
uint16_t port_id;
@@ -271,8 +274,6 @@ struct sfc_adapter {
bool mgmt_evq_running;
struct sfc_evq *mgmt_evq;
- unsigned int rxq_count;
- struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq_ctrl;
unsigned int txq_count;
@@ -294,6 +295,12 @@ sfc_adapter_shared_by_eth_dev(struct rte_eth_dev *eth_dev)
return sa->priv.shared;
}
+static inline struct sfc_adapter_shared *
+sfc_sa2shared(struct sfc_adapter *sa)
+{
+ return sa->priv.shared;
+}
+
/*
* Add wrapper functions to acquire/release lock to be able to remove or
* change the lock in one place.
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index e30217cf1..a84690bb0 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -402,6 +402,7 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool)
{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
@@ -415,7 +416,7 @@ sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
if (rc != 0)
goto fail_rx_qinit;
- dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].dp;
+ dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
sfc_adapter_unlock(sa);
@@ -1067,14 +1068,15 @@ static void
sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
struct rte_eth_rxq_info *qinfo)
{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
struct sfc_rxq_info *rxq_info;
sfc_adapter_lock(sa);
- SFC_ASSERT(rx_queue_id < sa->rxq_count);
+ SFC_ASSERT(rx_queue_id < sas->rxq_count);
- rxq_info = &sa->rxq_info[rx_queue_id];
+ rxq_info = &sas->rxq_info[rx_queue_id];
qinfo->mp = rxq_info->refill_mb_pool;
qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
@@ -1125,11 +1127,11 @@ static uint32_t
sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_rxq_info *rxq_info;
- SFC_ASSERT(rx_queue_id < sa->rxq_count);
- rxq_info = &sa->rxq_info[rx_queue_id];
+ SFC_ASSERT(rx_queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[rx_queue_id];
if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
return 0;
@@ -1185,6 +1187,7 @@ sfc_tx_descriptor_status(void *queue, uint16_t offset)
static int
sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
@@ -1196,14 +1199,14 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (sa->state != SFC_ADAPTER_STARTED)
goto fail_not_started;
- if (sa->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
+ if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
goto fail_not_setup;
rc = sfc_rx_qstart(sa, rx_queue_id);
if (rc != 0)
goto fail_rx_qstart;
- sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+ sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
sfc_adapter_unlock(sa);
@@ -1220,6 +1223,7 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
static int
sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
struct sfc_adapter *sa = dev->data->dev_private;
sfc_log_init(sa, "RxQ=%u", rx_queue_id);
@@ -1227,7 +1231,7 @@ sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
sfc_adapter_lock(sa);
sfc_rx_qstop(sa, rx_queue_id);
- sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+ sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
sfc_adapter_unlock(sa);
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index e20c2e612..ab5f24f51 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -1241,7 +1241,7 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
{
struct sfc_rxq *rxq;
- if (queue->index >= sa->rxq_count)
+ if (queue->index >= sfc_sa2shared(sa)->rxq_count)
return -EINVAL;
rxq = &sa->rxq_ctrl[queue->index];
@@ -1268,7 +1268,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
if (action_rss->queue_num == 0)
return -EINVAL;
- rxq_sw_index = sa->rxq_count - 1;
+ rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
rxq = &sa->rxq_ctrl[rxq_sw_index];
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
@@ -1276,7 +1276,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
for (i = 0; i < action_rss->queue_num; ++i) {
rxq_sw_index = action_rss->queue[i];
- if (rxq_sw_index >= sa->rxq_count)
+ if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
return -EINVAL;
rxq = &sa->rxq_ctrl[rxq_sw_index];
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index eb4875fec..8af9d2148 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -381,15 +381,15 @@ sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
{
const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
struct rte_eth_dev *eth_dev;
- struct sfc_adapter *sa;
+ struct sfc_adapter_shared *sas;
SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
eth_dev = &rte_eth_devices[dpq->port_id];
- sa = eth_dev->data->dev_private;
+ sas = sfc_adapter_shared_by_eth_dev(eth_dev);
- SFC_ASSERT(dpq->queue_id < sa->rxq_count);
- return &sa->rxq_info[dpq->queue_id];
+ SFC_ASSERT(dpq->queue_id < sas->rxq_count);
+ return &sas->rxq_info[dpq->queue_id];
}
struct sfc_rxq *
@@ -404,7 +404,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
sa = eth_dev->data->dev_private;
- SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
return &sa->rxq_ctrl[dpq->queue_id];
}
@@ -567,7 +567,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
unsigned int wait_count;
int rc;
- rxq_info = &sa->rxq_info[sw_index];
+ rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
rxq = &sa->rxq_ctrl[sw_index];
@@ -677,9 +677,9 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
sfc_log_init(sa, "sw_index=%u", sw_index);
- SFC_ASSERT(sw_index < sa->rxq_count);
+ SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
- rxq_info = &sa->rxq_info[sw_index];
+ rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
rxq = &sa->rxq_ctrl[sw_index];
@@ -766,9 +766,9 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_log_init(sa, "sw_index=%u", sw_index);
- SFC_ASSERT(sw_index < sa->rxq_count);
+ SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
- rxq_info = &sa->rxq_info[sw_index];
+ rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
if (rxq_info->state == SFC_RXQ_INITIALIZED)
return;
@@ -1007,8 +1007,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
goto fail_bad_conf;
}
- SFC_ASSERT(sw_index < sa->rxq_count);
- rxq_info = &sa->rxq_info[sw_index];
+ SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
+ rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
rxq_info->entries = rxq_entries;
@@ -1098,10 +1098,10 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq_info *rxq_info;
struct sfc_rxq *rxq;
- SFC_ASSERT(sw_index < sa->rxq_count);
+ SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
sa->eth_dev->data->rx_queues[sw_index] = NULL;
- rxq_info = &sa->rxq_info[sw_index];
+ rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
@@ -1345,10 +1345,11 @@ sfc_rx_rss_config(struct sfc_adapter *sa)
int
sfc_rx_start(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
unsigned int sw_index;
int rc;
- sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
rc = efx_rx_init(sa->nic);
if (rc != 0)
@@ -1358,10 +1359,10 @@ sfc_rx_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_rss_config;
- for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
- if (sa->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
- (!sa->rxq_info[sw_index].deferred_start ||
- sa->rxq_info[sw_index].deferred_started)) {
+ for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {
+ if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
+ (!sas->rxq_info[sw_index].deferred_start ||
+ sas->rxq_info[sw_index].deferred_started)) {
rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
goto fail_rx_qstart;
@@ -1385,13 +1386,14 @@ sfc_rx_start(struct sfc_adapter *sa)
void
sfc_rx_stop(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
unsigned int sw_index;
- sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+ sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
- sw_index = sa->rxq_count;
+ sw_index = sas->rxq_count;
while (sw_index-- > 0) {
- if (sa->rxq_info[sw_index].state & SFC_RXQ_STARTED)
+ if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)
sfc_rx_qstop(sa, sw_index);
}
@@ -1401,7 +1403,8 @@ sfc_rx_stop(struct sfc_adapter *sa)
static int
sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
{
- struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
unsigned int max_entries;
max_entries = EFX_RXQ_MAXNDESCS;
@@ -1463,17 +1466,18 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
static void
sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
int sw_index;
- SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+ SFC_ASSERT(nb_rx_queues <= sas->rxq_count);
- sw_index = sa->rxq_count;
+ sw_index = sas->rxq_count;
while (--sw_index >= (int)nb_rx_queues) {
- if (sa->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)
+ if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)
sfc_rx_qfini(sa, sw_index);
}
- sa->rxq_count = nb_rx_queues;
+ sas->rxq_count = nb_rx_queues;
}
/**
@@ -1487,27 +1491,28 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
int
sfc_rx_configure(struct sfc_adapter *sa)
{
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_rss *rss = &sa->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
- nb_rx_queues, sa->rxq_count);
+ nb_rx_queues, sas->rxq_count);
rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
if (rc != 0)
goto fail_check_mode;
- if (nb_rx_queues == sa->rxq_count)
+ if (nb_rx_queues == sas->rxq_count)
goto configure_rss;
- if (sa->rxq_info == NULL) {
+ if (sas->rxq_info == NULL) {
rc = ENOMEM;
- sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
- sizeof(sa->rxq_info[0]), 0,
- sa->socket_id);
- if (sa->rxq_info == NULL)
+ sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sas->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sas->rxq_info == NULL)
goto fail_rxqs_alloc;
/*
@@ -1522,13 +1527,13 @@ sfc_rx_configure(struct sfc_adapter *sa)
struct sfc_rxq_info *new_rxq_info;
struct sfc_rxq *new_rxq_ctrl;
- if (nb_rx_queues < sa->rxq_count)
+ if (nb_rx_queues < sas->rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues);
rc = ENOMEM;
new_rxq_info =
- rte_realloc(sa->rxq_info,
- nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ rte_realloc(sas->rxq_info,
+ nb_rx_queues * sizeof(sas->rxq_info[0]), 0);
if (new_rxq_info == NULL && nb_rx_queues > 0)
goto fail_rxqs_realloc;
@@ -1538,29 +1543,29 @@ sfc_rx_configure(struct sfc_adapter *sa)
if (new_rxq_ctrl == NULL && nb_rx_queues > 0)
goto fail_rxqs_ctrl_realloc;
- sa->rxq_info = new_rxq_info;
+ sas->rxq_info = new_rxq_info;
sa->rxq_ctrl = new_rxq_ctrl;
- if (nb_rx_queues > sa->rxq_count) {
- memset(&sa->rxq_info[sa->rxq_count], 0,
- (nb_rx_queues - sa->rxq_count) *
- sizeof(sa->rxq_info[0]));
- memset(&sa->rxq_ctrl[sa->rxq_count], 0,
- (nb_rx_queues - sa->rxq_count) *
+ if (nb_rx_queues > sas->rxq_count) {
+ memset(&sas->rxq_info[sas->rxq_count], 0,
+ (nb_rx_queues - sas->rxq_count) *
+ sizeof(sas->rxq_info[0]));
+ memset(&sa->rxq_ctrl[sas->rxq_count], 0,
+ (nb_rx_queues - sas->rxq_count) *
sizeof(sa->rxq_ctrl[0]));
}
}
- while (sa->rxq_count < nb_rx_queues) {
- rc = sfc_rx_qinit_info(sa, sa->rxq_count);
+ while (sas->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sas->rxq_count);
if (rc != 0)
goto fail_rx_qinit_info;
- sa->rxq_count++;
+ sas->rxq_count++;
}
configure_rss:
rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+ MIN(sas->rxq_count, EFX_MAXRSS) : 0;
if (rss->channels > 0) {
struct rte_eth_rss_conf *adv_conf_rss;
@@ -1607,6 +1612,6 @@ sfc_rx_close(struct sfc_adapter *sa)
free(sa->rxq_ctrl);
sa->rxq_ctrl = NULL;
- rte_free(sa->rxq_info);
- sa->rxq_info = NULL;
+ rte_free(sfc_sa2shared(sa)->rxq_info);
+ sfc_sa2shared(sa)->rxq_info = NULL;
}
--
2.17.1
next prev parent reply other threads:[~2019-02-07 12:18 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-07 12:17 [dpdk-dev] [PATCH 00/30] net/sfc: improve multi-process support Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 01/30] net/sfc: log port ID as 16-bit unsigned integer on panic Andrew Rybchenko
2019-02-08 10:13 ` Ferruh Yigit
2019-02-08 10:31 ` Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 02/30] net/sfc: remove control path logging from Rx queue count Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 03/30] net/sfc: fix logging from secondary process Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 04/30] net/sfc: avoid usage of RxQ control structure in info get Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 05/30] net/sfc: avoid usage of TxQ " Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 06/30] net/sfc: remove wrappers around Rx descriptor count and done Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 07/30] net/sfc: make it simpler to change datapath ops location Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 08/30] net/sfc: move datapath ops pointers to process private data Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 09/30] net/sfc: move main log type " Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 10/30] net/sfc: move RxQ state to multi-process shared location Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 11/30] net/sfc: move datapath RxQ handle to shared RxQ info Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 12/30] net/sfc: support Rx descriptor status in secondary process Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 13/30] net/sfc: move TxQ state to multi-process shared location Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 14/30] net/sfc: move datapath TxQ handle to shared TxQ info Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 15/30] net/sfc: support Tx descriptor status in secondary process Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 16/30] net/sfc: support RSS RETA and hash config get in secondary Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 17/30] net/sfc: remove unnecessary functions to get RxQ index Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 18/30] net/sfc: remove unnecessary functions to get TxQ index Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 19/30] net/sfc: remove RxQ control from shared RxQ info Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 20/30] net/sfc: remove TxQ control from shared TxQ info Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 21/30] net/sfc: start to factor out multi-process shared data Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 22/30] net/sfc: move Rx/Tx datapath names to shared state Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 23/30] net/sfc: make main logging macro reusable in secondary Andrew Rybchenko
2019-02-07 12:17 ` Andrew Rybchenko [this message]
2019-02-07 12:17 ` [dpdk-dev] [PATCH 25/30] net/sfc: move TxQ shared information to adapter shared Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 26/30] net/sfc: move RSS config " Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 27/30] net/sfc: move isolated flag in " Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 28/30] net/sfc: remove adapter locks from secondary process ops Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 29/30] net/sfc: separate adapter primary process and shared data Andrew Rybchenko
2019-02-07 12:17 ` [dpdk-dev] [PATCH 30/30] net/sfc: support Rx packet types get in secondary process Andrew Rybchenko
2019-02-07 15:08 ` [dpdk-dev] [PATCH 00/30] net/sfc: improve multi-process support Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1549541873-17403-25-git-send-email-arybchenko@solarflare.com \
--to=arybchenko@solarflare.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).