* [dpdk-dev] [PATCH 1/6] xen: allow determining DOM0 at runtime
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 2/6] ethdev: add common function for reserving DMA regions Stephen Hemminger
` (6 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
Add memory infrastructure for runtime Xen DOM0 support.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
lib/librte_eal/common/include/rte_memory.h | 30 ++++++++++++++++-
lib/librte_eal/linuxapp/eal/eal_memory.c | 7 ++++
lib/librte_eal/linuxapp/eal/eal_xen_memory.c | 2 +-
lib/librte_mempool/rte_mempool.c | 48 ++++++++++++++++++++--------
lib/librte_mempool/rte_mempool.h | 3 +-
5 files changed, 72 insertions(+), 18 deletions(-)
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 1bed415..067be10 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -52,6 +52,8 @@
extern "C" {
#endif
+#include <rte_common.h>
+
enum rte_page_sizes {
RTE_PGSIZE_4K = 1ULL << 12,
RTE_PGSIZE_64K = 1ULL << 16,
@@ -180,6 +182,13 @@ unsigned rte_memory_get_nchannel(void);
unsigned rte_memory_get_nrank(void);
#ifdef RTE_LIBRTE_XEN_DOM0
+
+/**< Internal use only - should DOM0 memory mapping be used */
+extern int is_xen_dom0_supported(void);
+
+/**< Internal use only - phys to virt mapping for xen */
+phys_addr_t rte_xen_mem_phy2mch(uint32_t, const phys_addr_t);
+
/**
* Return the physical address of elt, which is an element of the pool mp.
*
@@ -191,7 +200,14 @@ unsigned rte_memory_get_nrank(void);
* @return
* The physical address or error.
*/
-phys_addr_t rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr);
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+{
+ if (is_xen_dom0_supported())
+ return rte_xen_mem_phy2mch(memseg_id, phy_addr);
+ else
+ return phy_addr;
+}
/**
* Memory init for supporting application running on Xen domain0.
@@ -214,7 +230,19 @@ int rte_xen_dom0_memory_init(void);
* negative: error
*/
int rte_xen_dom0_memory_attach(void);
+#else
+static inline int is_xen_dom0_supported(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t
+rte_mem_phy2mch(uint32_t memseg_id __rte_unused, const phys_addr_t phy_addr)
+{
+ return phy_addr;
+}
#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index ac2745e..f36cabd 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -97,6 +97,13 @@
#include "eal_filesystem.h"
#include "eal_hugepages.h"
+#ifdef RTE_LIBRTE_XEN_DOM0
+int is_xen_dom0_supported(void)
+{
+ return internal_config.xen_dom0_support;
+}
+#endif
+
/**
* @file
* Huge page mapping under linux
diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
index d228a9d..7fd9e83 100644
--- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c
@@ -156,7 +156,7 @@ get_xen_memory_size(void)
* Based on physical address to caculate MFN in Xen Dom0.
*/
phys_addr_t
-rte_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
+rte_xen_mem_phy2mch(uint32_t memseg_id, const phys_addr_t phy_addr)
{
int mfn_id;
uint64_t mfn, mfn_offset;
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 8e185c5..d063268 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -375,6 +375,26 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
return usz;
}
+#ifndef RTE_LIBRTE_XEN_DOM0
+/* stub if DOM0 support not configured */
+struct rte_mempool *
+rte_dom0_mempool_create(const char *name __rte_unused,
+ unsigned n __rte_unused,
+ unsigned elt_size __rte_unused,
+ unsigned cache_size __rte_unused,
+ unsigned private_data_size __rte_unused,
+ rte_mempool_ctor_t *mp_init __rte_unused,
+ void *mp_init_arg __rte_unused,
+ rte_mempool_obj_ctor_t *obj_init __rte_unused,
+ void *obj_init_arg __rte_unused,
+ int socket_id __rte_unused,
+ unsigned flags __rte_unused)
+{
+ rte_errno = EINVAL;
+ return NULL;
+}
+#endif
+
/* create the mempool */
struct rte_mempool *
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
@@ -383,20 +403,20 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_dom0_mempool_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags);
-#else
- return rte_mempool_xmem_create(name, n, elt_size,
- cache_size, private_data_size,
- mp_init, mp_init_arg,
- obj_init, obj_init_arg,
- socket_id, flags,
- NULL, NULL, MEMPOOL_PG_NUM_DEFAULT, MEMPOOL_PG_SHIFT_MAX);
-#endif
+ if (is_xen_dom0_supported())
+ return rte_dom0_mempool_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags);
+ else
+ return rte_mempool_xmem_create(name, n, elt_size,
+ cache_size, private_data_size,
+ mp_init, mp_init_arg,
+ obj_init, obj_init_arg,
+ socket_id, flags,
+ NULL, NULL, MEMPOOL_PG_NUM_DEFAULT,
+ MEMPOOL_PG_SHIFT_MAX);
}
/*
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8abeca9..6e2390a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -640,7 +640,6 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags, void *vaddr,
const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift);
-#ifdef RTE_LIBRTE_XEN_DOM0
/**
* Create a new mempool named *name* in memory on Xen Dom0.
*
@@ -728,7 +727,7 @@ rte_dom0_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_ctor_t *mp_init, void *mp_init_arg,
rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags);
-#endif
+
/**
* Dump the status of the mempool to the console.
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 2/6] ethdev: add common function for reserving DMA regions
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 1/6] xen: allow determining DOM0 at runtime Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 3/6] e1000: use rte_eth_dma_zone_reserve Stephen Hemminger
` (5 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
The code to create aligned DMA regions was copy-n-pasted throughout
all the drivers. Since this code has to change now create a common
function that just does the right thing for Xen at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
lib/librte_ether/rte_ethdev.c | 24 ++++++++++++++++++++++++
lib/librte_ether/rte_ethdev.h | 23 +++++++++++++++++++++++
2 files changed, 47 insertions(+)
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index f593f6e..6c520a3 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -2838,6 +2838,30 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
return 0;
}
+
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
+ uint16_t queue_id, size_t size, unsigned align,
+ int socket_id)
+{
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ dev->driver->pci_drv.name, ring_name,
+ dev->data->port_id, queue_id);
+
+ mz = rte_memzone_lookup(z_name);
+ if (mz)
+ return mz;
+
+ if (is_xen_dom0_supported())
+ return rte_memzone_reserve_bounded(z_name, size, socket_id,
+ 0, align, RTE_PGSIZE_2M);
+ else
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ 0, align);
+}
int
rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 8a8c82b..8d2098d 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -3598,6 +3598,29 @@ extern int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
extern int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
struct timespec *timestamp);
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ * The name of the memory zone
+ * @param queue_id
+ * The index of the queue to add to name
+ * @param size
+ * The sizeof of the memory area
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+ uint16_t queue_id, size_t size,
+ unsigned align, int socket_id);
#ifdef __cplusplus
}
#endif
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 3/6] e1000: use rte_eth_dma_zone_reserve
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 1/6] xen: allow determining DOM0 at runtime Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 2/6] ethdev: add common function for reserving DMA regions Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 4/6] ixgbe: " Stephen Hemminger
` (4 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
Use common code to handle Xen support at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/e1000/em_rxtx.c | 38 ++++----------------------------------
drivers/net/e1000/igb_rxtx.c | 41 ++++++-----------------------------------
2 files changed, 10 insertions(+), 69 deletions(-)
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..32e0953 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1104,28 +1104,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
#define EM_MAX_BUF_SIZE 16384
#define EM_RCTL_FLXBUF_STEP 1024
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- const struct rte_memzone *mz;
- char z_name[RTE_MEMZONE_NAMESIZE];
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name, dev->data->port_id,
- queue_id);
-
- if ((mz = rte_memzone_lookup(z_name)) != 0)
- return (mz);
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
-#endif
-}
-
static void
em_tx_queue_release_mbufs(struct em_tx_queue *txq)
{
@@ -1273,8 +1251,8 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
* resizing in later calls to the queue setup function.
*/
tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
- if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
- socket_id)) == NULL)
+ if ((tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
+ RTE_CACHE_LINE_SIZE, socket_id)) == NULL)
return (-ENOMEM);
/* Allocate the tx queue data structure. */
@@ -1300,11 +1278,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1400,8 +1374,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
/* Allocate RX ring for max possible mumber of hardware descriptors. */
rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
- if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
- socket_id)) == NULL)
+ if ((rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
+ RTE_CACHE_LINE_SIZE, socket_id)) == NULL)
return (-ENOMEM);
/* Allocate the RX queue data structure. */
@@ -1430,11 +1404,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 19905fd..7a4bd5b 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1167,29 +1167,6 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
#define IGB_MIN_RING_DESC 32
#define IGB_MAX_RING_DESC 4096
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, IGB_ALIGN);
-#endif
-}
-
static void
igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
{
@@ -1322,8 +1299,8 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
* resizing in later calls to the queue setup function.
*/
size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
- size, socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+ IGB_ALIGN, socket_id);
if (tz == NULL) {
igb_tx_queue_release(txq);
return (-ENOMEM);
@@ -1341,12 +1318,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
- txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+
+ txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
txq->sw_ring = rte_zmalloc("txq->sw_ring",
sizeof(struct igb_tx_entry) * nb_desc,
@@ -1470,18 +1444,15 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
* resizing in later calls to the queue setup function.
*/
size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
- rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
+ IGB_ALIGN, socket_id);
if (rz == NULL) {
igb_rx_queue_release(rxq);
return (-ENOMEM);
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 4/6] ixgbe: use rte_eth_dma_zone_reserve
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
` (2 preceding siblings ...)
2015-10-23 6:34 ` [dpdk-dev] [PATCH 3/6] e1000: use rte_eth_dma_zone_reserve Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 5/6] i40e: " Stephen Hemminger
` (3 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
Adapt DMA memory for Xen at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/ixgbe/ixgbe_rxtx.c | 47 ++++++------------------------------------
1 file changed, 6 insertions(+), 41 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index a598a72..b6cbb64 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1839,35 +1839,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
#define IXGBE_MIN_RING_DESC 32
#define IXGBE_MAX_RING_DESC 4096
-/*
- * Create memzone for HW rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone * __attribute__((cold))
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, IXGBE_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, IXGBE_ALIGN);
-#endif
-}
-
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
@@ -2102,9 +2073,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
- socket_id);
+ IXGBE_ALIGN, socket_id);
if (tz == NULL) {
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
@@ -2134,11 +2105,8 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
-#ifndef RTE_LIBRTE_XEN_DOM0
- txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
-#else
+
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#endif
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -2408,8 +2376,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
- RX_RING_SZ, socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ RX_RING_SZ, IXGBE_ALIGN, socket_id);
if (rz == NULL) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
@@ -2438,11 +2406,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
-#ifndef RTE_LIBRTE_XEN_DOM0
- rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
-#else
+
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#endif
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 5/6] i40e: use rte_eth_dma_zone_reserve
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
` (3 preceding siblings ...)
2015-10-23 6:34 ` [dpdk-dev] [PATCH 4/6] ixgbe: " Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-23 6:34 ` [dpdk-dev] [PATCH 6/6] fm10k: " Stephen Hemminger
` (2 subsequent siblings)
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
Handle Xen support at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 9 ----
drivers/net/i40e/i40e_fdir.c | 5 +--
drivers/net/i40e/i40e_rxtx.c | 94 ++++++++----------------------------------
3 files changed, 19 insertions(+), 89 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..4bf2a5f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2098,24 +2098,15 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
id++;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
-#ifdef RTE_LIBRTE_XEN_DOM0
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
alignment, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY, 0,
- alignment);
-#endif
if (!mz)
return I40E_ERR_NO_MEMORY;
mem->id = id;
mem->size = size;
mem->va = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- mem->pa = mz->phys_addr;
-#endif
return I40E_SUCCESS;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index c9ce98f..ad64ff7 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -275,11 +275,8 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
-#endif
+
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
vsi->base_queue);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..77b90fa 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -84,12 +84,6 @@
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id);
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -2170,11 +2164,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "rx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_ALIGN, socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
@@ -2184,12 +2175,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -2438,11 +2424,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "tx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_ALIGN, socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
@@ -2467,11 +2450,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2518,47 +2497,22 @@ i40e_dev_tx_queue_release(void *txq)
rte_free(q);
}
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, I40E_ALIGN);
-#endif
-}
-
const struct rte_memzone *
i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
- const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *mz;
mz = rte_memzone_lookup(name);
if (mz)
return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(name, len,
+
+ if (is_xen_dom0_supported())
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+ else
+ mz = rte_memzone_reserve_aligned(name, len,
socket_id, 0, I40E_ALIGN);
-#endif
+
return mz;
}
@@ -2971,11 +2925,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "fdir_tx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_ALIGN, SOCKET_ID_ANY);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
@@ -2987,11 +2939,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -3031,11 +2979,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "fdir_rx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_ALIGN, SOCKET_ID_ANY);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
@@ -3047,11 +2993,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 6/6] fm10k: use rte_eth_dma_zone_reserve
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
` (4 preceding siblings ...)
2015-10-23 6:34 ` [dpdk-dev] [PATCH 5/6] i40e: " Stephen Hemminger
@ 2015-10-23 6:34 ` Stephen Hemminger
2015-10-26 7:24 ` [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Liu, Jijiang
2015-11-03 0:40 ` Thomas Monjalon
7 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-10-23 6:34 UTC (permalink / raw)
To: dev
Adapt to Xen at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
v2 - fix typo where virt to phys is done
drivers/net/fm10k/fm10k_ethdev.c | 47 +++++-----------------------------------
1 file changed, 6 insertions(+), 41 deletions(-)
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index a69c990..571f5f0 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1144,34 +1144,6 @@ check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
return 0;
}
-/*
- * Create a memzone for hardware descriptor rings. Malloc cannot be used since
- * the physical address is required. If the memzone is already created, then
- * this function returns a pointer to the existing memzone.
- */
-static inline const struct rte_memzone *
-allocate_hw_ring(const char *driver_name, const char *ring_name,
- uint8_t port_id, uint16_t queue_id, int socket_id,
- uint32_t size, uint32_t align)
-{
- char name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
- driver_name, ring_name, port_id, queue_id, socket_id);
-
- /* return the memzone if it already exists */
- mz = rte_memzone_lookup(name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
- RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
-#endif
-}
static inline int
check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
@@ -1317,9 +1289,9 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
- mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
- dev->data->port_id, queue_id, socket_id,
- FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+ FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
+ socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
@@ -1327,11 +1299,8 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return (-ENOMEM);
}
q->hw_ring = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- q->hw_ring_phys_addr = mz->phys_addr;
-#endif
dev->data->rx_queues[queue_id] = q;
return 0;
@@ -1467,9 +1436,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
- mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
- dev->data->port_id, queue_id, socket_id,
- FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+ FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
+ socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
@@ -1477,11 +1446,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return (-ENOMEM);
}
q->hw_ring = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- q->hw_ring_phys_addr = mz->phys_addr;
-#endif
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
` (5 preceding siblings ...)
2015-10-23 6:34 ` [dpdk-dev] [PATCH 6/6] fm10k: " Stephen Hemminger
@ 2015-10-26 7:24 ` Liu, Jijiang
2015-11-03 0:40 ` Thomas Monjalon
7 siblings, 0 replies; 10+ messages in thread
From: Liu, Jijiang @ 2015-10-26 7:24 UTC (permalink / raw)
To: Stephen Hemminger, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen
> Hemminger
> Sent: Friday, October 23, 2015 2:35 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support
>
> It should be possible to build a single application or library that will work both
> in Xen and non-Xen environment. Any special case handling should be done
> at runtime.
>
> Rebased to current DPDK source.
>
> Stephen Hemminger (6):
> xen: allow determining DOM0 at runtime
> ethdev: add common function for reserving DMA regions
> e1000: use rte_eth_dma_zone_reserve
> ixgbe: use rte_eth_dma_zone_reserve
> i40e: use rte_eth_dma_zone_reserve
> fm10k: use rte_eth_dma_zone_reserve
>
> drivers/net/e1000/em_rxtx.c | 38 ++---------
> drivers/net/e1000/igb_rxtx.c | 41 ++----------
> drivers/net/fm10k/fm10k_ethdev.c | 47 ++------------
> drivers/net/i40e/i40e_ethdev.c | 9 ---
> drivers/net/i40e/i40e_fdir.c | 5 +-
> drivers/net/i40e/i40e_rxtx.c | 94 ++++++----------------------
> drivers/net/ixgbe/ixgbe_rxtx.c | 47 ++------------
> lib/librte_eal/common/include/rte_memory.h | 30 ++++++++-
> lib/librte_eal/linuxapp/eal/eal_memory.c | 7 +++
> lib/librte_eal/linuxapp/eal/eal_xen_memory.c | 2 +-
> lib/librte_ether/rte_ethdev.c | 24 +++++++
> lib/librte_ether/rte_ethdev.h | 23 +++++++
> lib/librte_mempool/rte_mempool.c | 48 +++++++++-----
> lib/librte_mempool/rte_mempool.h | 3 +-
> 14 files changed, 160 insertions(+), 258 deletions(-)
>
> --
> 2.1.4
Acked-by: Jijiang Liu <Jijiang.liu@intel.com>
Thanks
Jijiang Liu
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support
2015-10-23 6:34 [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Stephen Hemminger
` (6 preceding siblings ...)
2015-10-26 7:24 ` [dpdk-dev] [PATCH v2 0/6] Xen DOM0 runtime support Liu, Jijiang
@ 2015-11-03 0:40 ` Thomas Monjalon
7 siblings, 0 replies; 10+ messages in thread
From: Thomas Monjalon @ 2015-11-03 0:40 UTC (permalink / raw)
To: Stephen Hemminger; +Cc: dev
2015-10-22 23:34, Stephen Hemminger:
> It should be possible to build a single application or library
> that will work both in Xen and non-Xen environment. Any special
> case handling should be done at runtime.
>
> Rebased to current DPDK source.
Sorry, it needs to be rebased again on top of Konstantin's changes.
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH 5/6] i40e: use rte_eth_dma_zone_reserve
2015-09-29 0:44 [dpdk-dev] [PATCH " Stephen Hemminger
@ 2015-09-29 0:44 ` Stephen Hemminger
0 siblings, 0 replies; 10+ messages in thread
From: Stephen Hemminger @ 2015-09-29 0:44 UTC (permalink / raw)
To: dev
Handle Xen support at runtime.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
drivers/net/i40e/i40e_ethdev.c | 9 ----
drivers/net/i40e/i40e_fdir.c | 5 +--
drivers/net/i40e/i40e_rxtx.c | 94 ++++++++----------------------------------
3 files changed, 19 insertions(+), 89 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..4bf2a5f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -2098,24 +2098,15 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
id++;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, id);
-#ifdef RTE_LIBRTE_XEN_DOM0
mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
alignment, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY, 0,
- alignment);
-#endif
if (!mz)
return I40E_ERR_NO_MEMORY;
mem->id = id;
mem->size = size;
mem->va = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- mem->pa = mz->phys_addr;
-#endif
return I40E_SUCCESS;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index c9ce98f..ad64ff7 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -275,11 +275,8 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- pf->fdir.dma_addr = (uint64_t)mz->phys_addr;
-#endif
+
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
vsi->base_queue);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..77b90fa 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -84,12 +84,6 @@
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id);
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -2170,11 +2164,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "rx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ ring_size, I40E_ALIGN, socket_id);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
@@ -2184,12 +2175,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -2438,11 +2424,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Allocate TX hardware ring descriptors. */
ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "tx_ring",
- queue_idx,
- ring_size,
- socket_id);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, I40E_ALIGN, socket_id);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
@@ -2467,11 +2450,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2518,47 +2497,22 @@ i40e_dev_tx_queue_release(void *txq)
rte_free(q);
}
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
- const char *ring_name,
- uint16_t queue_id,
- uint32_t ring_size,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(z_name, ring_size,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, I40E_ALIGN);
-#endif
-}
-
const struct rte_memzone *
i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
{
- const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *mz;
mz = rte_memzone_lookup(name);
if (mz)
return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
- mz = rte_memzone_reserve_bounded(name, len,
- socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
-#else
- mz = rte_memzone_reserve_aligned(name, len,
+
+ if (is_xen_dom0_supported())
+ mz = rte_memzone_reserve_bounded(name, len,
+ socket_id, 0, I40E_ALIGN, RTE_PGSIZE_2M);
+ else
+ mz = rte_memzone_reserve_aligned(name, len,
socket_id, 0, I40E_ALIGN);
-#endif
+
return mz;
}
@@ -2971,11 +2925,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- tz = i40e_ring_dma_zone_reserve(dev,
- "fdir_tx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_ALIGN, SOCKET_ID_ANY);
if (!tz) {
i40e_dev_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
@@ -2987,11 +2939,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
- txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -3031,11 +2979,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
- rz = i40e_ring_dma_zone_reserve(dev,
- "fdir_rx_ring",
- I40E_FDIR_QUEUE_ID,
- ring_size,
- SOCKET_ID_ANY);
+ rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+ I40E_FDIR_QUEUE_ID, ring_size,
+ I40E_ALIGN, SOCKET_ID_ANY);
if (!rz) {
i40e_dev_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
@@ -3047,11 +2993,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
-#ifdef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
- rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
--
2.1.4
^ permalink raw reply [flat|nested] 10+ messages in thread