From: Xiaojun Liu <xiaojun.liu@silicom.co.il>
To: xiao.w.wang@intel.com, qi.z.zhang@intel.com,
ngai-mint.kwan@intel.com, jacob.e.keller@intel.com
Cc: dev@dpdk.org, Xiaojun Liu <xiaojun.liu@silicom.co.il>
Subject: [dpdk-dev] [PATCH v1 5/5] net/fm10k: add switch management support
Date: Fri, 28 Feb 2020 16:38:22 +0800 [thread overview]
Message-ID: <1582879102-17977-6-git-send-email-xiaojun.liu@silicom.co.il> (raw)
In-Reply-To: <1582879102-17977-1-git-send-email-xiaojun.liu@silicom.co.il>
Split dev init to 2 parts.
First only register the port in switch
management; second init hook will be
called after all the pf are registered
and switch initialization. It will finish
dev init. Also add switch interrupt support.
Add fm10k_mirror_rule_set/fm10k_mirror_rule_reset
to support mirror operation. Add fm10k_dev_filter_ctrl
to support flow operation.
Add dpdk port and pf mapping, so
the dpdk port can map to a specific pf
and 1 dpdk port can map to 2 pf to get
total 100G throughput.
To enable the switch management, you need add
CONFIG_RTE_FM10K_MANAGEMENT=y in
config/common_linux when building.
Signed-off-by: Xiaojun Liu <xiaojun.liu@silicom.co.il>
---
drivers/net/fm10k/base/fm10k_type.h | 1 +
drivers/net/fm10k/fm10k_ethdev.c | 562 +++++++++++++++++++++++++++++---
drivers/net/fm10k/switch/fm10k_switch.c | 20 +-
drivers/net/fm10k/switch/fm10k_switch.h | 1 +
4 files changed, 541 insertions(+), 43 deletions(-)
diff --git a/drivers/net/fm10k/base/fm10k_type.h b/drivers/net/fm10k/base/fm10k_type.h
index 84781ba..810e5ee 100644
--- a/drivers/net/fm10k/base/fm10k_type.h
+++ b/drivers/net/fm10k/base/fm10k_type.h
@@ -717,6 +717,7 @@ struct fm10k_hw {
u16 subsystem_vendor_id;
u8 revision_id;
u32 flags;
+ bool switch_ready;
#define FM10K_HW_FLAG_CLOCK_OWNER BIT(0)
};
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 581c690..f11fa53 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -13,6 +13,10 @@
#include "fm10k.h"
#include "base/fm10k_api.h"
+#ifdef RTE_FM10K_MANAGEMENT
+#include "switch/fm10k_regs.h"
+#include "switch/fm10k_switch.h"
+#endif
/* Default delay to acquire mailbox lock */
#define FM10K_MBXLOCK_DELAY_US 20
@@ -461,9 +465,6 @@ struct fm10k_xstats_name_off {
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
- dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
-
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -512,6 +513,15 @@ struct fm10k_xstats_name_off {
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
uint32_t mrqc, *key, i, reta, j;
uint64_t hf;
+#ifdef RTE_FM10K_MANAGEMENT
+ uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ int mapped_num;
+ struct fm10k_hw *mapped_hws[FM10K_SW_PEP_MAP_NUM_MAX];
+
+ mapped_num = fm10k_switch_dpdk_mapped_hw_get(hw, mapped_hws);
+ if (mapped_num == 2)
+ nb_rx_queues /= 2;
+#endif
#define RSS_KEY_SIZE 40
static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
@@ -642,26 +652,44 @@ struct fm10k_xstats_name_off {
fm10k_dev_tx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+ uint32_t data;
+#endif
int i, ret;
+ uint16_t hw_queue_id;
struct fm10k_tx_queue *txq;
uint64_t base_addr;
uint32_t size;
+#ifndef RTE_FM10K_MANAGEMENT
/* Disable TXINT to avoid possible interrupt */
for (i = 0; i < hw->mac.max_queues; i++)
FM10K_WRITE_REG(hw, FM10K_TXINT(i),
3 << FM10K_TXINT_TIMER_SHIFT);
+#else
+ fm10k_switch_dpdk_tx_queue_num_set(unmap_hw,
+ dev->data->nb_tx_queues);
+#endif
/* Setup TX queue */
for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+ hw_queue_id = i;
+#ifdef RTE_FM10K_MANAGEMENT
+ fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ i, dev->data->nb_tx_queues,
+ &hw, &hw_queue_id);
+#endif
txq = dev->data->tx_queues[i];
base_addr = txq->hw_ring_phys_addr;
size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
/* disable queue to avoid issues while updating state */
- ret = tx_queue_disable(hw, i);
+ ret = tx_queue_disable(hw, hw_queue_id);
if (ret) {
- PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ PMD_INIT_LOG(ERR,
+ "failed to disable queue %d",
+ hw_queue_id);
return -1;
}
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
@@ -669,7 +697,7 @@ struct fm10k_xstats_name_off {
*/
if (fm10k_check_ftag(dev->device->devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
- FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
+ FM10K_WRITE_REG(hw, FM10K_PFVTCTL(hw_queue_id),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
} else {
@@ -679,15 +707,25 @@ struct fm10k_xstats_name_off {
}
/* set location and size for descriptor ring */
- FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
+ FM10K_WRITE_REG(hw, FM10K_TDBAL(hw_queue_id),
base_addr & UINT64_LOWER_32BITS_MASK);
- FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
+ FM10K_WRITE_REG(hw, FM10K_TDBAH(hw_queue_id),
base_addr >> (CHAR_BIT * sizeof(uint32_t)));
- FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
+ FM10K_WRITE_REG(hw, FM10K_TDLEN(hw_queue_id), size);
/* assign default SGLORT for each TX queue by PF */
+#ifndef RTE_FM10K_MANAGEMENT
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
+ FM10K_WRITE_REG(hw,
+ FM10K_TX_SGLORT(hw_queue_id),
+ hw->mac.dglort_map);
+#else
+ if (hw->mac.type == fm10k_mac_pf) {
+ data = FM10K_SW_MAKE_REG_FIELD
+ (TX_SGLORT_SGLORT, hw->mac.dglort_map);
+ FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(hw_queue_id), data);
+ }
+#endif
}
/* set up vector or scalar TX function as appropriate */
@@ -700,18 +738,24 @@ struct fm10k_xstats_name_off {
fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifndef RTE_FM10K_MANAGEMENT
struct fm10k_macvlan_filter_info *macvlan;
struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+ uint32_t logic_port = hw->mac.dglort_map;
+ uint16_t queue_stride = 0;
+#else
+ struct fm10k_hw *unmap_hw = hw;
+#endif
int i, ret;
+ uint16_t hw_queue_id;
struct fm10k_rx_queue *rxq;
uint64_t base_addr;
uint32_t size;
uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
- uint32_t logic_port = hw->mac.dglort_map;
uint16_t buf_size;
- uint16_t queue_stride = 0;
+#ifndef RTE_FM10K_MANAGEMENT
/* enable RXINT for interrupt mode */
i = 0;
if (rte_intr_dp_is_en(intr_handle)) {
@@ -731,26 +775,36 @@ struct fm10k_xstats_name_off {
for (; i < hw->mac.max_queues; i++)
FM10K_WRITE_REG(hw, FM10K_RXINT(i),
3 << FM10K_RXINT_TIMER_SHIFT);
+#else
+ fm10k_switch_dpdk_rx_queue_num_set(unmap_hw, dev->data->nb_rx_queues);
+#endif
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ hw_queue_id = i;
+#ifdef RTE_FM10K_MANAGEMENT
+ fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ i, dev->data->nb_rx_queues, &hw, &hw_queue_id);
+#endif
rxq = dev->data->rx_queues[i];
base_addr = rxq->hw_ring_phys_addr;
size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
/* disable queue to avoid issues while updating state */
- ret = rx_queue_disable(hw, i);
+ ret = rx_queue_disable(hw, hw_queue_id);
if (ret) {
- PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
+ PMD_INIT_LOG(ERR,
+ "failed to disable queue %d",
+ hw_queue_id);
return -1;
}
/* Setup the Base and Length of the Rx Descriptor Ring */
- FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
+ FM10K_WRITE_REG(hw, FM10K_RDBAL(hw_queue_id),
base_addr & UINT64_LOWER_32BITS_MASK);
- FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
+ FM10K_WRITE_REG(hw, FM10K_RDBAH(hw_queue_id),
base_addr >> (CHAR_BIT * sizeof(uint32_t)));
- FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
+ FM10K_WRITE_REG(hw, FM10K_RDLEN(hw_queue_id), size);
/* Configure the Rx buffer size for one buff without split */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -764,7 +818,7 @@ struct fm10k_xstats_name_off {
*/
buf_size -= FM10K_RX_DATABUF_ALIGN;
- FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(hw_queue_id),
(buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
FM10K_SRRCTL_LOOPBACK_SUPPRESS);
@@ -774,9 +828,9 @@ struct fm10k_xstats_name_off {
rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
- reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
+ reg = FM10K_READ_REG(hw, FM10K_SRRCTL(hw_queue_id));
reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
- FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
+ FM10K_WRITE_REG(hw, FM10K_SRRCTL(hw_queue_id), reg);
}
/* Enable drop on empty, it's RO for VF */
@@ -796,6 +850,7 @@ struct fm10k_xstats_name_off {
/* update RX_SGLORT for loopback suppress*/
if (hw->mac.type != fm10k_mac_pf)
return 0;
+#ifndef RTE_FM10K_MANAGEMENT
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
if (macvlan->nb_queue_pools)
queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
@@ -804,6 +859,7 @@ struct fm10k_xstats_name_off {
logic_port++;
FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
}
+#endif
return 0;
}
@@ -812,12 +868,29 @@ struct fm10k_xstats_name_off {
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+ int ret;
+#endif
int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
+ uint16_t hw_queue_id = rx_queue_id;
PMD_INIT_FUNC_TRACE();
+#ifdef RTE_FM10K_MANAGEMENT
+ ret = fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ rx_queue_id, dev->data->nb_rx_queues,
+ &hw, &hw_queue_id);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW and rx queue for:"
+ " %s %d", dev->device->name, rx_queue_id);
+ return -EIO;
+ } else if (ret != 1) /* reference port's queue don't need start */
+ return 0;
+#endif
+
rxq = dev->data->rx_queues[rx_queue_id];
err = rx_queue_reset(rxq);
if (err == -ENOMEM) {
@@ -836,23 +909,23 @@ struct fm10k_xstats_name_off {
* this comment and the following two register writes when the
* emulation platform is no longer being used.
*/
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ FM10K_WRITE_REG(hw, FM10K_RDH(hw_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(hw_queue_id), rxq->nb_desc - 1);
/* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(hw_queue_id));
if (hw->mac.type == fm10k_mac_pf)
reg |= FM10K_RXQCTL_PF;
reg |= FM10K_RXQCTL_ENABLE;
/* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(hw_queue_id), reg);
FM10K_WRITE_FLUSH(hw);
/* Setup the HW Rx Head and Tail Descriptor Pointers
* Note: this must be done AFTER the queue is enabled
*/
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ FM10K_WRITE_REG(hw, FM10K_RDH(hw_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(hw_queue_id), rxq->nb_desc - 1);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
@@ -879,21 +952,38 @@ struct fm10k_xstats_name_off {
fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+ int ret;
+#endif
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+ uint16_t hw_queue_id = tx_queue_id;
PMD_INIT_FUNC_TRACE();
+#ifdef RTE_FM10K_MANAGEMENT
+ ret = fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ tx_queue_id, dev->data->nb_tx_queues, &hw, &hw_queue_id);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW and tx queue for:"
+ " %s %d", dev->device->name, tx_queue_id);
+ return -EIO;
+ } else if (ret != 1) {
+ return 0;
+ }
+#endif
+
q->ops->reset(q);
/* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDH(hw_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(hw_queue_id), 0);
/* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(hw_queue_id),
FM10K_TXDCTL_ENABLE | txdctl);
FM10K_WRITE_FLUSH(hw);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -1084,9 +1174,25 @@ static inline int fm10k_glort_valid(struct fm10k_hw *hw)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int i, diag;
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *mapped_hws[FM10K_SW_PEP_MAP_NUM_MAX];
+ int j, mapped_num;
+ uint32_t data;
+#endif
PMD_INIT_FUNC_TRACE();
+#ifdef RTE_FM10K_MANAGEMENT
+ mapped_num = fm10k_switch_dpdk_mapped_hw_get(hw, mapped_hws);
+ if (mapped_num < 0 || mapped_num > FM10K_SW_PEP_MAP_NUM_MAX) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW for %s",
+ dev->device->name);
+ return -EIO;
+ }
+#endif
+
+
+#ifndef RTE_FM10K_MANAGEMENT
/* stop, init, then start the hw */
diag = fm10k_stop_hw(hw);
if (diag != FM10K_SUCCESS) {
@@ -1105,6 +1211,62 @@ static inline int fm10k_glort_valid(struct fm10k_hw *hw)
PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
return -EIO;
}
+#else
+ for (j = 0; j < mapped_num; j++) {
+ struct rte_pci_device *pdev =
+ RTE_ETH_DEV_TO_PCI((struct rte_eth_dev *)
+ (fm10k_switch_dpdk_port_rte_dev_get(mapped_hws[j])));
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
+
+ /* stop, init, then start the hw */
+ diag = fm10k_stop_hw(mapped_hws[j]);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_init_hw(mapped_hws[j]);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
+ return -EIO;
+ }
+
+ diag = fm10k_start_hw(mapped_hws[j]);
+ if (diag != FM10K_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
+ return -EIO;
+ }
+
+ /* Disable TXINT to avoid possible interrupt */
+ for (i = 0; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(mapped_hws[j], FM10K_TXINT(i),
+ 3 << FM10K_TXINT_TIMER_SHIFT);
+
+ /* enable RXINT for interrupt mode */
+ i = 0;
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (; i < dev->data->nb_rx_queues; i++) {
+ FM10K_WRITE_REG(mapped_hws[j],
+ FM10K_RXINT(i), Q2V(pdev, i));
+ if (mapped_hws[j]->mac.type == fm10k_mac_pf)
+ FM10K_WRITE_REG(mapped_hws[j],
+ FM10K_ITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ else
+ FM10K_WRITE_REG(mapped_hws[j],
+ FM10K_VFITR(Q2V(pdev, i)),
+ FM10K_ITR_AUTOMASK |
+ FM10K_ITR_MASK_CLEAR);
+ }
+ }
+
+ /* Disable other RXINT to avoid possible interrupt */
+ for (; i < hw->mac.max_queues; i++)
+ FM10K_WRITE_REG(mapped_hws[j], FM10K_RXINT(i),
+ 3 << FM10K_RXINT_TIMER_SHIFT);
+ }
+#endif
diag = fm10k_dev_tx_init(dev);
if (diag) {
@@ -1156,10 +1318,26 @@ static inline int fm10k_glort_valid(struct fm10k_hw *hw)
}
}
+#ifndef RTE_FM10K_MANAGEMENT
/* Update default vlan when not in VMDQ mode */
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+#endif
+#ifdef RTE_FM10K_MANAGEMENT
+ /* Admit all VLANs */
+ for (j = 0; j <= 64; j++) {
+ for (i = 0; i < FM10K_SW_VLAN_TABLE_ENTRIES; i++)
+ FM10K_WRITE_REG(hw,
+ FM10K_SW_VLAN_TABLE_ENTRY(j, i),
+ 0xffffffff);
+ }
+
+ /* Disable PEP loopback */
+ data = FM10K_READ_REG(hw, FM10K_CTRL_EXT);
+ data &= ~FM10K_SW_CTRL_EXT_SWITCH_LOOPBACK;
+ FM10K_WRITE_REG(hw, FM10K_CTRL_EXT, data);
+#endif
fm10k_link_update(dev, 0);
return 0;
@@ -1327,12 +1505,24 @@ static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct fm10k_hw_stats *hw_stats =
FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
int i;
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+ struct fm10k_hw *mapped_hws[FM10K_SW_PEP_MAP_NUM_MAX];
+ int mapped_num;
+ uint16_t hw_queue_id;
+#endif
PMD_INIT_FUNC_TRACE();
+ ipackets = 0;
+ opackets = 0;
+ ibytes = 0;
+ obytes = 0;
+ imissed = 0;
+
+#ifndef RTE_FM10K_MANAGEMENT
fm10k_update_hw_stats(hw, hw_stats);
- ipackets = opackets = ibytes = obytes = imissed = 0;
for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
(i < hw->mac.max_queues); ++i) {
stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
@@ -1346,6 +1536,49 @@ static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
obytes += stats->q_obytes[i];
imissed += stats->q_errors[i];
}
+#else
+ /* Get statistics from mapped device's queue */
+ mapped_num = fm10k_switch_dpdk_mapped_hw_get(unmap_hw, mapped_hws);
+ if (mapped_num < 0 || mapped_num > FM10K_SW_PEP_MAP_NUM_MAX) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW for %s",
+ dev->device->name);
+ return -EIO;
+ }
+
+ for (i = 0; i < mapped_num; i++) {
+ struct rte_eth_dev *mydev =
+ fm10k_switch_dpdk_port_rte_dev_get(mapped_hws[i]);
+ hw_stats = FM10K_DEV_PRIVATE_TO_STATS(mydev->data->dev_private);
+ fm10k_update_hw_stats(mapped_hws[i], hw_stats);
+ }
+
+ if (mapped_num) {
+ for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
+ (i < unmap_hw->mac.max_queues); ++i) {
+ fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ i, unmap_hw->mac.max_queues,
+ &hw, &hw_queue_id);
+ if (mapped_hws[1]) {
+ struct rte_eth_dev *mydev;
+ mydev = fm10k_switch_dpdk_port_rte_dev_get(hw);
+ hw_stats = FM10K_DEV_PRIVATE_TO_STATS
+ (mydev->data->dev_private);
+ }
+ stats->q_ipackets[i] =
+ hw_stats->q[hw_queue_id].rx_packets.count;
+ stats->q_opackets[i] =
+ hw_stats->q[hw_queue_id].tx_packets.count;
+ stats->q_ibytes[i] =
+ hw_stats->q[hw_queue_id].rx_bytes.count;
+ stats->q_obytes[i] =
+ hw_stats->q[hw_queue_id].tx_bytes.count;
+ ipackets += stats->q_ipackets[i];
+ opackets += stats->q_opackets[i];
+ ibytes += stats->q_ibytes[i];
+ obytes += stats->q_obytes[i];
+ }
+ }
+#endif
stats->ipackets = ipackets;
stats->opackets = opackets;
stats->ibytes = ibytes;
@@ -1808,8 +2041,7 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_HEADER_SPLIT |
- DEV_RX_OFFLOAD_RSS_HASH);
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
}
static int
@@ -1818,14 +2050,28 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+#endif
struct fm10k_dev_info *dev_info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
uint64_t offloads;
+ uint16_t hw_queue_id = queue_id;
PMD_INIT_FUNC_TRACE();
+#ifdef RTE_FM10K_MANAGEMENT
+ if (fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ queue_id, dev->data->nb_rx_queues,
+ &hw, &hw_queue_id) < 0) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW and queue for:"
+ " %s %d", dev->device->name, queue_id);
+ return -EIO;
+ }
+#endif
+
offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
/* make sure the mempool element size can account for alignment. */
@@ -1871,7 +2117,7 @@ static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
- &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ &((uint32_t *)hw->hw_addr)[FM10K_RDT(hw_queue_id)];
q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
@@ -2007,12 +2253,26 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
const struct rte_eth_txconf *conf)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *unmap_hw = hw;
+#endif
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
uint64_t offloads;
+ uint16_t hw_queue_id = queue_id;
PMD_INIT_FUNC_TRACE();
+#ifdef RTE_FM10K_MANAGEMENT
+ if (fm10k_switch_dpdk_hw_queue_map(unmap_hw,
+ queue_id, dev->data->nb_tx_queues,
+ &hw, &hw_queue_id) < 0) {
+ PMD_INIT_LOG(ERR, "Can not get the mapped HW and queue for:"
+ " %s %d", dev->device->name, queue_id);
+ return -EIO;
+ }
+#endif
+
offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
/* make sure a valid number of descriptors have been requested */
@@ -2054,7 +2314,7 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
- &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
+ &((uint32_t *)hw->hw_addr)[FM10K_TDT(hw_queue_id)];
if (handle_txconf(q, conf))
return -EINVAL;
@@ -2284,6 +2544,77 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
return 0;
}
+#ifdef RTE_FM10K_MANAGEMENT
+static int
+fm10k_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t sw_id, uint8_t on)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_LOG(DEBUG,
+ "Mirror set, switch %d to port %d attach vlan %d on %d",
+ sw_id, mirror_conf->dst_pool,
+ mirror_conf->vlan.vlan_id[0], on);
+
+ if (on) {
+ if (fm10k_switch_mirror_set(hw,
+ mirror_conf->dst_pool,
+ mirror_conf->vlan.vlan_id[0]) < 0) {
+ PMD_INIT_LOG(ERR, "Input wrong port!!!");
+ return -1;
+ }
+ } else {
+ if (fm10k_switch_mirror_reset(hw) < 0) {
+ PMD_INIT_LOG(ERR, "Input wrong port!!!");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+static int
+fm10k_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
+{
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ PMD_INIT_LOG(DEBUG, "Mirror reset, switch %d", sw_id);
+
+ fm10k_switch_mirror_reset(hw);
+
+ return 0;
+}
+
+static int
+fm10k_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = fm10k_flow_ops_get();
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+#endif
+
static void
fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
{
@@ -2592,6 +2923,9 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
int status_mbx;
s32 err;
+#ifdef RTE_FM10K_MANAGEMENT
+ uint32_t writeback = 0;
+#endif
if (hw->mac.type != fm10k_mac_pf)
return;
@@ -2605,11 +2939,20 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
}
/* Handle switch up/down */
- if (cause & FM10K_EICR_SWITCHNOTREADY)
- PMD_INIT_LOG(ERR, "INT: Switch is not ready");
+ if (cause & FM10K_EICR_SWITCHNOTREADY) {
+ PMD_INIT_LOG(INFO, "INT: Switch is not ready");
+#ifdef RTE_FM10K_MANAGEMENT
+ hw->switch_ready = false;
+ writeback |= FM10K_EICR_SWITCHNOTREADY;
+#endif
+ }
if (cause & FM10K_EICR_SWITCHREADY) {
PMD_INIT_LOG(INFO, "INT: Switch is ready");
+#ifdef RTE_FM10K_MANAGEMENT
+ hw->switch_ready = true;
+ writeback |= FM10K_EICR_SWITCHREADY;
+#endif
if (dev_info->sm_down == 1) {
fm10k_mbx_lock(hw);
@@ -2660,6 +3003,7 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
}
/* Handle mailbox message */
+#ifndef RTE_FM10K_MANAGEMENT
fm10k_mbx_lock(hw);
err = hw->mbx.ops.process(hw, &hw->mbx);
fm10k_mbx_unlock(hw);
@@ -2667,10 +3011,33 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
if (err == FM10K_ERR_RESET_REQUESTED) {
PMD_INIT_LOG(INFO, "INT: Switch is down");
dev_info->sm_down = 1;
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
+ _rte_eth_dev_callback_process
+ (dev,
+ RTE_ETH_EVENT_INTR_LSC,
NULL);
}
+#else
+ if (cause & FM10K_EICR_MAILBOX) {
+ fm10k_mbx_lock(hw);
+ err = hw->mbx.ops.process(hw, &hw->mbx);
+ fm10k_mbx_unlock(hw);
+ writeback |= FM10K_EICR_MAILBOX;
+ if (err == FM10K_ERR_RESET_REQUESTED) {
+ PMD_INIT_LOG(INFO, "INT: Switch is down");
+ dev_info->sm_down = 1;
+ _rte_eth_dev_callback_process
+ (dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
+ }
+
+ /* Handle switch interrupt */
+ if (cause & FM10K_SW_EICR_SWITCH_INT)
+ fm10k_switch_intr(hw);
+#endif
+
/* Handle SRAM error */
if (cause & FM10K_EICR_SRAMERROR) {
PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
@@ -2682,15 +3049,27 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
/* Todo: print out error message after shared code updates */
}
+#ifndef RTE_FM10K_MANAGEMENT
/* Clear these 3 events if having any */
cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
FM10K_EICR_SWITCHREADY;
if (cause)
FM10K_WRITE_REG(hw, FM10K_EICR, cause);
+#else
+ if (writeback)
+ FM10K_WRITE_REG(hw, FM10K_EICR, writeback);
+#endif
/* Re-enable interrupt from device side */
- FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
+#ifndef RTE_FM10K_MANAGEMENT
+ FM10K_WRITE_REG(hw, FM10K_ITR(0),
+ FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
+#else
+ FM10K_WRITE_REG(hw, FM10K_ITR(0),
+ FM10K_SW_ITR_AUTO_MASK |
+ FM10K_SW_MAKE_REG_FIELD(ITR_MASK, FM10K_SW_ITR_MASK_W_ENABLE));
+#endif
/* Re-enable interrupt from host side */
rte_intr_ack(dev->intr_handle);
}
@@ -2897,6 +3276,11 @@ static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
.reta_query = fm10k_reta_query,
.rss_hash_update = fm10k_rss_hash_update,
.rss_hash_conf_get = fm10k_rss_hash_conf_get,
+#ifdef RTE_FM10K_MANAGEMENT
+ .mirror_rule_set = fm10k_mirror_rule_set,
+ .mirror_rule_reset = fm10k_mirror_rule_reset,
+ .filter_ctrl = fm10k_dev_filter_ctrl,
+#endif
};
static int ftag_check_handler(__rte_unused const char *key,
@@ -3075,13 +3459,67 @@ static void __attribute__((cold))
info->sm_down = false;
}
+#ifdef RTE_FM10K_MANAGEMENT
+static int eth_fm10k_dev_init_hook(struct fm10k_hw *hw)
+{
+ int i;
+ struct rte_eth_dev *dev =
+ (struct rte_eth_dev *)fm10k_switch_dpdk_port_rte_dev_get(hw);
+
+ /*
+ * Below function will trigger operations on mailbox, acquire lock to
+ * avoid race condition from interrupt handler. Operations on mailbox
+ * FIFO will trigger interrupt to PF/SM, in which interrupt handler
+ * will handle and generate an interrupt to our side. Then, FIFO in
+ * mailbox will be touched.
+ */
+ if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) {
+ PMD_INIT_LOG(ERR, "dglort_map is not ready");
+ return -1;
+ }
+
+ fm10k_mbx_lock(hw);
+ /* Enable port first */
+ hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
+ MAX_LPORT_NUM, 1);
+ /* Set unicast mode by default. App can change to other mode in other
+ * API func.
+ */
+ hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
+ FM10K_XCAST_MODE_NONE);
+ fm10k_mbx_unlock(hw);
+
+ /* Make sure default VID is ready before going forward. */
+ if (hw->mac.type == fm10k_mac_pf) {
+ for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
+ if (hw->mac.default_vid)
+ break;
+ /* Delay some time to acquire async port VLAN info. */
+ rte_delay_us(WAIT_SWITCH_MSG_US);
+ }
+
+ if (hw->mac.default_vid == 0)
+ hw->mac.default_vid = 1;
+ }
+
+ /* Add default mac address */
+ fm10k_MAC_filter_set(dev, hw->mac.addr, true,
+ MAIN_VSI_POOL_NUMBER);
+
+ return 0;
+}
+#endif
+
static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pdev->intr_handle;
- int diag, i;
+ int diag;
+#ifndef RTE_FM10K_MANAGEMENT
+ int i;
+#endif
struct fm10k_macvlan_filter_info *macvlan;
PMD_INIT_FUNC_TRACE();
@@ -3118,7 +3556,9 @@ static void __attribute__((cold))
" Try to blacklist unused devices.");
return -EIO;
}
-
+#ifdef RTE_FM10K_MANAGEMENT
+ hw->sw_addr = (void *)pdev->mem_resource[4].addr;
+#endif
/* Store fm10k_adapter pointer */
hw->back = dev->data->dev_private;
@@ -3129,6 +3569,25 @@ static void __attribute__((cold))
return -EIO;
}
+#ifdef RTE_FM10K_MANAGEMENT
+ if (hw->mac.type == fm10k_mac_pf) {
+ if (hw->hw_addr == NULL || hw->sw_addr == NULL) {
+ PMD_INIT_LOG(ERR, "Bad mem resource."
+ " Try to blacklist unused devices.");
+ return -EIO;
+ }
+ } else {
+ if (hw->hw_addr == NULL) {
+ PMD_INIT_LOG(ERR, "Bad mem resource."
+ " Try to blacklist unused devices.");
+ return -EIO;
+ }
+ }
+
+ /* Store fm10k_adapter pointer */
+ hw->back = dev->data->dev_private;
+#endif
+
/* Initialize parameters */
fm10k_params_init(dev);
@@ -3209,6 +3668,7 @@ static void __attribute__((cold))
hw->mac.ops.update_int_moderator(hw);
/* Make sure Switch Manager is ready before going forward. */
+#ifndef RTE_FM10K_MANAGEMENT
if (hw->mac.type == fm10k_mac_pf) {
bool switch_ready = false;
@@ -3216,13 +3676,13 @@ static void __attribute__((cold))
fm10k_mbx_lock(hw);
hw->mac.ops.get_host_state(hw, &switch_ready);
fm10k_mbx_unlock(hw);
- if (switch_ready == true)
+ if (switch_ready)
break;
/* Delay some time to acquire async LPORT_MAP info. */
rte_delay_us(WAIT_SWITCH_MSG_US);
}
- if (switch_ready == false) {
+ if (switch_ready == 0) {
PMD_INIT_LOG(ERR, "switch is not ready");
return -1;
}
@@ -3268,11 +3728,25 @@ static void __attribute__((cold))
MAIN_VSI_POOL_NUMBER);
return 0;
+#else
+ if (hw->mac.type == fm10k_mac_pf) {
+ bool master = FM10K_READ_REG(hw,
+ FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED;
+ return fm10k_switch_dpdk_port_start(hw,
+ dev, 1, master, eth_fm10k_dev_init_hook);
+ } else {
+ return fm10k_switch_dpdk_port_start(hw,
+ dev, 0, false, eth_fm10k_dev_init_hook);
+ }
+#endif
}
static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
+#ifdef RTE_FM10K_MANAGEMENT
+ struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#endif
PMD_INIT_FUNC_TRACE();
/* only uninitialize in the primary process */
@@ -3282,6 +3756,10 @@ static void __attribute__((cold))
/* safe to close dev here */
fm10k_dev_close(dev);
+#ifdef RTE_FM10K_MANAGEMENT
+ fm10k_switch_dpdk_port_stop(hw);
+#endif
+
return 0;
}
diff --git a/drivers/net/fm10k/switch/fm10k_switch.c b/drivers/net/fm10k/switch/fm10k_switch.c
index 009d62a..abe719d 100644
--- a/drivers/net/fm10k/switch/fm10k_switch.c
+++ b/drivers/net/fm10k/switch/fm10k_switch.c
@@ -31,6 +31,10 @@
#define FM10K_LED_POLL_INTERVAL_MS 500
#define FM10K_LED_BLINKS_PER_SECOND 2
+/* Max try times to acquire switch status */
+#define FM10K_QUERY_SWITCH_STATE_TIMES 10
+/* Wait interval to get switch status */
+#define FM10K_WAIT_SWITCH_MSG_US 100000
/*
* use epl as external port map, only support QUAD_ON
@@ -1914,6 +1918,19 @@ struct fm10k_switch*
}
}
+ /* Make sure Switch Manager is ready before going forward. */
+ for (i = 0; i < FM10K_QUERY_SWITCH_STATE_TIMES; i++) {
+ if (master_hw->switch_ready)
+ break;
+ /* Delay some time to acquire async LPORT_MAP info. */
+ rte_delay_us(FM10K_WAIT_SWITCH_MSG_US);
+ }
+
+ if (!master_hw->switch_ready) {
+ PMD_INIT_LOG(ERR, "switch is not ready");
+ return -1;
+ }
+
/* do initialize all ports, after switch is ready */
for (i = 0; i < FM10K_SW_LOGICAL_PORTS_MAX; i++) {
uint32_t sglort;
@@ -1941,8 +1958,9 @@ struct fm10k_switch*
return -1;
}
hw->mac.dglort_map = sglort | 0xffff0000;
+ hw->switch_ready = master_hw->switch_ready;
+ func(hw);
}
- func(hw);
}
return 0;
}
diff --git a/drivers/net/fm10k/switch/fm10k_switch.h b/drivers/net/fm10k/switch/fm10k_switch.h
index b4b363c..a2aa0d0 100644
--- a/drivers/net/fm10k/switch/fm10k_switch.h
+++ b/drivers/net/fm10k/switch/fm10k_switch.h
@@ -34,6 +34,7 @@
#define FM10K_SW_PEPS_MAX 9
#define FM10K_SW_PEPS_SUPPORTED 4
#define FM10K_SW_PEP_PORTS_MAX 4
+#define FM10K_SW_PEP_MAP_NUM_MAX 2
/*
* GLORT
--
1.8.3.1
next prev parent reply other threads:[~2020-02-28 8:40 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-12-11 9:51 [dpdk-dev] [PATCH v2 0/7] support switch management Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 1/7] net/fm10k: add i2c sbus registers definition Xiaojun Liu
2019-12-11 15:48 ` Jerin Jacob
2019-12-12 9:35 ` Xiaojun Liu
2019-12-12 17:12 ` Jerin Jacob
2019-12-13 2:44 ` Xiaojun Liu
2019-12-16 4:54 ` Jerin Jacob
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 0/5] support switch management Xiaojun Liu
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 1/5] net/fm10k: add basic functions for " Xiaojun Liu
2020-02-28 8:38 ` [dpdk-dev] [PATCH v1 0/5] support fm10k " Xiaojun Liu
2020-02-28 8:38 ` [dpdk-dev] [PATCH v1 1/5] net/fm10k: add basic functions for " Xiaojun Liu
2020-03-16 6:37 ` Wang, Xiao W
2020-02-28 8:38 ` [dpdk-dev] [PATCH v1 2/5] net/fm10k: add epl serdes and port control functions Xiaojun Liu
2020-03-16 6:48 ` Wang, Xiao W
2020-02-28 8:38 ` [dpdk-dev] [PATCH v1 3/5] net/fm10k: add ffu and statistics and config file functions Xiaojun Liu
2020-03-16 7:04 ` Wang, Xiao W
2020-02-28 8:38 ` [dpdk-dev] [PATCH v1 4/5] net/fm10k: add flow interface and switch management Xiaojun Liu
2020-02-28 8:38 ` Xiaojun Liu [this message]
2020-03-16 7:34 ` [dpdk-dev] [PATCH v1 5/5] net/fm10k: add switch management support Wang, Xiao W
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 0/5] support fm10k switch management Xiaojun Liu
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 1/5] net/fm10k: add basic functions for " Xiaojun Liu
2020-04-02 8:41 ` Wang, Xiao W
2020-04-08 3:25 ` Xiaojun Liu
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 2/5] net/fm10k: add epl serdes and port control functions Xiaojun Liu
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 3/5] net/fm10k: add ffu and statistics and config file functions Xiaojun Liu
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 4/5] net/fm10k: add flow interface and switch management Xiaojun Liu
2020-04-02 8:55 ` Wang, Xiao W
2020-04-09 6:24 ` Xiaojun Liu
2020-03-20 6:58 ` [dpdk-dev] [PATCH v3 5/5] net/fm10k: add switch management support Xiaojun Liu
2020-04-02 9:12 ` Wang, Xiao W
2020-04-09 6:32 ` Xiaojun Liu
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 2/5] net/fm10k: add epl serdes and port control functions Xiaojun Liu
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 3/5] net/fm10k: add ffu and statistics and config file functions Xiaojun Liu
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 4/5] net/fm10k: add flow interface and switch management Xiaojun Liu
2020-02-20 13:59 ` [dpdk-dev] [PATCH v2 5/5] net/fm10k: add switch management support Xiaojun Liu
2020-02-25 11:28 ` Wang, Xiao W
2020-02-25 12:56 ` Xiaojun Liu
2020-03-05 1:23 ` Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 2/7] net/fm10k: add some modules of port Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 3/7] net/fm10k: add config ffu statistics support Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 4/7] net/fm10k: add flow and switch management Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 5/7] net/fm10k: add switch initialization Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 6/7] net/fm10k: add mirror and filter ctrl Xiaojun Liu
2019-12-11 9:52 ` [dpdk-dev] [PATCH v2 7/7] net/fm10k: add dpdk port mapping Xiaojun Liu
2020-01-21 2:53 ` [dpdk-dev] [PATCH v2 0/7] support switch management Wang, Xiao W
2020-01-21 6:15 ` Xiaojun Liu
2020-02-11 10:31 ` Wang, Xiao W
2020-02-14 2:46 ` Xiaojun Liu
[not found] ` <ORIGINAL-RELEASE-1581835643311863404-DB7PR04MB5196A5418792DFB1F96DB8B7BD150@DB7PR04MB5196.eurprd04.prod.outlook.com>
2020-02-19 5:58 ` Xiaojun Liu
2020-02-19 10:56 ` Wang, Xiao W
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1582879102-17977-6-git-send-email-xiaojun.liu@silicom.co.il \
--to=xiaojun.liu@silicom.co.il \
--cc=dev@dpdk.org \
--cc=jacob.e.keller@intel.com \
--cc=ngai-mint.kwan@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=xiao.w.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).