* [dpdk-dev] [PATCH] kni: add chained mbufs support
@ 2016-04-25 16:11 Ferruh Yigit
2016-04-26 6:49 ` Zhang, Helin
0 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2016-04-25 16:11 UTC (permalink / raw)
To: dev; +Cc: Helin Zhang, Ferruh Yigit
rx_q fifo may have chained mbufs, merge them into single skb before
handing to the network stack.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
.../linuxapp/eal/include/exec-env/rte_kni_common.h | 4 +-
lib/librte_eal/linuxapp/kni/kni_net.c | 83 ++++++++++++++++------
2 files changed, 64 insertions(+), 23 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 7e5e598..2acdfd9 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -113,7 +113,9 @@ struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
char pad0[10];
uint16_t data_off; /**< Start address of data in segment buffer. */
- char pad1[4];
+ char pad1[2];
+ uint8_t nb_segs; /**< Number of segments. */
+ char pad4[1];
uint64_t ol_flags; /**< Offload features. */
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c
index cfa8339..570de71 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
@@ -156,7 +156,8 @@ kni_net_rx_normal(struct kni_dev *kni)
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
+
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ kni->mbuf_kva;
@@ -165,22 +166,41 @@ kni_net_rx_normal(struct kni_dev *kni)
KNI_ERR("Out of mem, dropping pkts\n");
/* Update statistics */
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 0) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- /* Call netif interface */
- netif_rx_ni(skb);
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
- /* Update statistics */
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ if (!kva->next)
+ break;
+
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Call netif interface */
+ netif_rx_ni(skb);
+
+ /* Update statistics */
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
}
/* Burst enqueue mbufs into free_q */
@@ -317,7 +337,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
kni->mbuf_kva;
@@ -338,20 +358,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
if (skb == NULL) {
KNI_ERR("Out of mem, dropping pkts\n");
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 0) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
+
+ if (!kva->next)
+ break;
- /* call tx interface */
- kni_net_tx(skb, dev);
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
+
+ /* call tx interface */
+ kni_net_tx(skb, dev);
}
/* enqueue all the mbufs from rx_q into free_q */
--
2.5.5
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] kni: add chained mbufs support
2016-04-25 16:11 [dpdk-dev] [PATCH] kni: add chained mbufs support Ferruh Yigit
@ 2016-04-26 6:49 ` Zhang, Helin
2016-04-26 8:05 ` Ferruh Yigit
0 siblings, 1 reply; 6+ messages in thread
From: Zhang, Helin @ 2016-04-26 6:49 UTC (permalink / raw)
To: Yigit, Ferruh; +Cc: dev
Have you tested with it?
I think we need to test it in a longer time, e.g. 1 hour
My commetns inlined.
Thanks,
Helin
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Tuesday, April 26, 2016 12:11 AM
> To: dev@dpdk.org
> Cc: Zhang, Helin; Yigit, Ferruh
> Subject: [PATCH] kni: add chained mbufs support
>
> rx_q fifo may have chained mbufs, merge them into single skb before
> handing to the network stack.
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
> .../linuxapp/eal/include/exec-env/rte_kni_common.h | 4 +-
> lib/librte_eal/linuxapp/kni/kni_net.c | 83 ++++++++++++++++------
> 2 files changed, 64 insertions(+), 23 deletions(-)
>
> diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> index 7e5e598..2acdfd9 100644
> --- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> @@ -113,7 +113,9 @@ struct rte_kni_mbuf {
> void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
> char pad0[10];
> uint16_t data_off; /**< Start address of data in segment buffer.
> */
> - char pad1[4];
> + char pad1[2];
> + uint8_t nb_segs; /**< Number of segments. */
> + char pad4[1];
> uint64_t ol_flags; /**< Offload features. */
> char pad2[4];
> uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len.
> */
> diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c
> b/lib/librte_eal/linuxapp/kni/kni_net.c
> index cfa8339..570de71 100644
> --- a/lib/librte_eal/linuxapp/kni/kni_net.c
> +++ b/lib/librte_eal/linuxapp/kni/kni_net.c
> @@ -156,7 +156,8 @@ kni_net_rx_normal(struct kni_dev *kni)
> /* Transfer received packets to netif */
> for (i = 0; i < num_rx; i++) {
> kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
> - len = kva->data_len;
> + len = kva->pkt_len;
> +
> data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
> + kni->mbuf_kva;
>
> @@ -165,22 +166,41 @@ kni_net_rx_normal(struct kni_dev *kni)
> KNI_ERR("Out of mem, dropping pkts\n");
> /* Update statistics */
> kni->stats.rx_dropped++;
> + continue;
> }
> - else {
> - /* Align IP on 16B boundary */
> - skb_reserve(skb, 2);
> +
> + /* Align IP on 16B boundary */
> + skb_reserve(skb, 2);
> +
> + if (kva->nb_segs == 0) {
I guess it should compare nb_segs with 1, but not 0. Am I wrong?
> memcpy(skb_put(skb, len), data_kva, len);
> - skb->dev = dev;
> - skb->protocol = eth_type_trans(skb, dev);
> - skb->ip_summed = CHECKSUM_UNNECESSARY;
> + } else {
> + int nb_segs;
> + int kva_nb_segs = kva->nb_segs;
>
> - /* Call netif interface */
> - netif_rx_ni(skb);
> + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++)
Kva_nb_segs might not needed at all, use kva->nb_segs directly?
> {
> + memcpy(skb_put(skb, kva->data_len),
> + data_kva, kva->data_len);
>
> - /* Update statistics */
> - kni->stats.rx_bytes += len;
> - kni->stats.rx_packets++;
> + if (!kva->next)
> + break;
> +
> + kva = kva->next - kni->mbuf_va + kni-
> >mbuf_kva;
> + data_kva = kva->buf_addr + kva->data_off
> + - kni->mbuf_va + kni->mbuf_kva;
> + }
> }
> +
> + skb->dev = dev;
> + skb->protocol = eth_type_trans(skb, dev);
> + skb->ip_summed = CHECKSUM_UNNECESSARY;
> +
> + /* Call netif interface */
> + netif_rx_ni(skb);
> +
> + /* Update statistics */
> + kni->stats.rx_bytes += len;
> + kni->stats.rx_packets++;
> }
>
> /* Burst enqueue mbufs into free_q */
> @@ -317,7 +337,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
> /* Copy mbufs to sk buffer and then call tx interface */
> for (i = 0; i < num; i++) {
> kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
> - len = kva->data_len;
> + len = kva->pkt_len;
> data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
> kni->mbuf_kva;
>
> @@ -338,20 +358,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
> if (skb == NULL) {
> KNI_ERR("Out of mem, dropping pkts\n");
> kni->stats.rx_dropped++;
> + continue;
> }
> - else {
> - /* Align IP on 16B boundary */
> - skb_reserve(skb, 2);
> +
> + /* Align IP on 16B boundary */
> + skb_reserve(skb, 2);
> +
> + if (kva->nb_segs == 0) {
The same commnets as above.
> memcpy(skb_put(skb, len), data_kva, len);
> - skb->dev = dev;
> - skb->ip_summed = CHECKSUM_UNNECESSARY;
> + } else {
> + int nb_segs;
> + int kva_nb_segs = kva->nb_segs;
>
> - kni->stats.rx_bytes += len;
> - kni->stats.rx_packets++;
> + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++)
The same comments as above.
> {
> + memcpy(skb_put(skb, kva->data_len),
> + data_kva, kva->data_len);
> +
> + if (!kva->next)
> + break;
>
> - /* call tx interface */
> - kni_net_tx(skb, dev);
> + kva = kva->next - kni->mbuf_va + kni-
> >mbuf_kva;
> + data_kva = kva->buf_addr + kva->data_off
> + - kni->mbuf_va + kni->mbuf_kva;
> + }
> }
> +
> + skb->dev = dev;
> + skb->ip_summed = CHECKSUM_UNNECESSARY;
> +
> + kni->stats.rx_bytes += len;
> + kni->stats.rx_packets++;
> +
> + /* call tx interface */
> + kni_net_tx(skb, dev);
> }
>
> /* enqueue all the mbufs from rx_q into free_q */
> --
> 2.5.5
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH] kni: add chained mbufs support
2016-04-26 6:49 ` Zhang, Helin
@ 2016-04-26 8:05 ` Ferruh Yigit
2016-04-26 12:37 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
0 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2016-04-26 8:05 UTC (permalink / raw)
To: Zhang, Helin; +Cc: dev
On 4/26/2016 7:49 AM, Zhang, Helin wrote:
> Have you tested with it?
Yes, has been tested.
> I think we need to test it in a longer time, e.g. 1 hour
I will make a longevity test before sending next patch.
> My commetns inlined.
>
> Thanks,
> Helin
>
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Tuesday, April 26, 2016 12:11 AM
>> To: dev@dpdk.org
>> Cc: Zhang, Helin; Yigit, Ferruh
>> Subject: [PATCH] kni: add chained mbufs support
>>
>> rx_q fifo may have chained mbufs, merge them into single skb before
>> handing to the network stack.
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> ---
>> .../linuxapp/eal/include/exec-env/rte_kni_common.h | 4 +-
>> lib/librte_eal/linuxapp/kni/kni_net.c | 83 ++++++++++++++++------
>> 2 files changed, 64 insertions(+), 23 deletions(-)
>>
>> diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
>> b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
>> index 7e5e598..2acdfd9 100644
>> --- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
>> +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
>> @@ -113,7 +113,9 @@ struct rte_kni_mbuf {
>> void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
>> char pad0[10];
>> uint16_t data_off; /**< Start address of data in segment buffer.
>> */
>> - char pad1[4];
>> + char pad1[2];
>> + uint8_t nb_segs; /**< Number of segments. */
>> + char pad4[1];
>> uint64_t ol_flags; /**< Offload features. */
>> char pad2[4];
>> uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len.
>> */
>> diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c
>> b/lib/librte_eal/linuxapp/kni/kni_net.c
>> index cfa8339..570de71 100644
>> --- a/lib/librte_eal/linuxapp/kni/kni_net.c
>> +++ b/lib/librte_eal/linuxapp/kni/kni_net.c
>> @@ -156,7 +156,8 @@ kni_net_rx_normal(struct kni_dev *kni)
>> /* Transfer received packets to netif */
>> for (i = 0; i < num_rx; i++) {
>> kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
>> - len = kva->data_len;
>> + len = kva->pkt_len;
>> +
>> data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
>> + kni->mbuf_kva;
>>
>> @@ -165,22 +166,41 @@ kni_net_rx_normal(struct kni_dev *kni)
>> KNI_ERR("Out of mem, dropping pkts\n");
>> /* Update statistics */
>> kni->stats.rx_dropped++;
>> + continue;
>> }
>> - else {
>> - /* Align IP on 16B boundary */
>> - skb_reserve(skb, 2);
>> +
>> + /* Align IP on 16B boundary */
>> + skb_reserve(skb, 2);
>> +
>> + if (kva->nb_segs == 0) {
> I guess it should compare nb_segs with 1, but not 0. Am I wrong?
>
Right, this needs to be 1, I will send a new patch.
>> memcpy(skb_put(skb, len), data_kva, len);
>> - skb->dev = dev;
>> - skb->protocol = eth_type_trans(skb, dev);
>> - skb->ip_summed = CHECKSUM_UNNECESSARY;
>> + } else {
>> + int nb_segs;
>> + int kva_nb_segs = kva->nb_segs;
>>
>> - /* Call netif interface */
>> - netif_rx_ni(skb);
>> + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++)
> Kva_nb_segs might not needed at all, use kva->nb_segs directly?
>
It is needed, kva keeps updated, so need to save number of segment for
first mbuf.
>> {
>> + memcpy(skb_put(skb, kva->data_len),
>> + data_kva, kva->data_len);
>>
>> - /* Update statistics */
>> - kni->stats.rx_bytes += len;
>> - kni->stats.rx_packets++;
>> + if (!kva->next)
>> + break;
>> +
>> + kva = kva->next - kni->mbuf_va + kni-
>>> mbuf_kva;
>> + data_kva = kva->buf_addr + kva->data_off
>> + - kni->mbuf_va + kni->mbuf_kva;
>> + }
>> }
>> +
>> + skb->dev = dev;
>> + skb->protocol = eth_type_trans(skb, dev);
>> + skb->ip_summed = CHECKSUM_UNNECESSARY;
>> +
>> + /* Call netif interface */
>> + netif_rx_ni(skb);
>> +
>> + /* Update statistics */
>> + kni->stats.rx_bytes += len;
>> + kni->stats.rx_packets++;
>> }
>>
>> /* Burst enqueue mbufs into free_q */
>> @@ -317,7 +337,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
>> /* Copy mbufs to sk buffer and then call tx interface */
>> for (i = 0; i < num; i++) {
>> kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
>> - len = kva->data_len;
>> + len = kva->pkt_len;
>> data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
>> kni->mbuf_kva;
>>
>> @@ -338,20 +358,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
>> if (skb == NULL) {
>> KNI_ERR("Out of mem, dropping pkts\n");
>> kni->stats.rx_dropped++;
>> + continue;
>> }
>> - else {
>> - /* Align IP on 16B boundary */
>> - skb_reserve(skb, 2);
>> +
>> + /* Align IP on 16B boundary */
>> + skb_reserve(skb, 2);
>> +
>> + if (kva->nb_segs == 0) {
> The same commnets as above.
>
>> memcpy(skb_put(skb, len), data_kva, len);
>> - skb->dev = dev;
>> - skb->ip_summed = CHECKSUM_UNNECESSARY;
>> + } else {
>> + int nb_segs;
>> + int kva_nb_segs = kva->nb_segs;
>>
>> - kni->stats.rx_bytes += len;
>> - kni->stats.rx_packets++;
>> + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++)
> The same comments as above.
>> {
>> + memcpy(skb_put(skb, kva->data_len),
>> + data_kva, kva->data_len);
>> +
>> + if (!kva->next)
>> + break;
>>
>> - /* call tx interface */
>> - kni_net_tx(skb, dev);
>> + kva = kva->next - kni->mbuf_va + kni-
>>> mbuf_kva;
>> + data_kva = kva->buf_addr + kva->data_off
>> + - kni->mbuf_va + kni->mbuf_kva;
>> + }
>> }
>> +
>> + skb->dev = dev;
>> + skb->ip_summed = CHECKSUM_UNNECESSARY;
>> +
>> + kni->stats.rx_bytes += len;
>> + kni->stats.rx_packets++;
>> +
>> + /* call tx interface */
>> + kni_net_tx(skb, dev);
>> }
>>
>> /* enqueue all the mbufs from rx_q into free_q */
>> --
>> 2.5.5
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2] kni: add chained mbufs support
2016-04-26 8:05 ` Ferruh Yigit
@ 2016-04-26 12:37 ` Ferruh Yigit
2016-04-27 2:25 ` Zhang, Helin
0 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2016-04-26 12:37 UTC (permalink / raw)
To: dev; +Cc: Helin Zhang, Ferruh Yigit
rx_q fifo may have chained mbufs, merge them into single skb before
handing to the network stack.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
.../linuxapp/eal/include/exec-env/rte_kni_common.h | 4 +-
lib/librte_eal/linuxapp/kni/kni_net.c | 83 ++++++++++++++++------
2 files changed, 64 insertions(+), 23 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 7e5e598..2acdfd9 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -113,7 +113,9 @@ struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
char pad0[10];
uint16_t data_off; /**< Start address of data in segment buffer. */
- char pad1[4];
+ char pad1[2];
+ uint8_t nb_segs; /**< Number of segments. */
+ char pad4[1];
uint64_t ol_flags; /**< Offload features. */
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c
index cfa8339..44f49cc 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
@@ -156,7 +156,8 @@ kni_net_rx_normal(struct kni_dev *kni)
/* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
+
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ kni->mbuf_kva;
@@ -165,22 +166,41 @@ kni_net_rx_normal(struct kni_dev *kni)
KNI_ERR("Out of mem, dropping pkts\n");
/* Update statistics */
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- /* Call netif interface */
- netif_rx_ni(skb);
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
- /* Update statistics */
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ if (!kva->next)
+ break;
+
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Call netif interface */
+ netif_rx_ni(skb);
+
+ /* Update statistics */
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
}
/* Burst enqueue mbufs into free_q */
@@ -317,7 +337,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
/* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
- len = kva->data_len;
+ len = kva->pkt_len;
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
kni->mbuf_kva;
@@ -338,20 +358,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
if (skb == NULL) {
KNI_ERR("Out of mem, dropping pkts\n");
kni->stats.rx_dropped++;
+ continue;
}
- else {
- /* Align IP on 16B boundary */
- skb_reserve(skb, 2);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, 2);
+
+ if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len);
- skb->dev = dev;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ int nb_segs;
+ int kva_nb_segs = kva->nb_segs;
- kni->stats.rx_bytes += len;
- kni->stats.rx_packets++;
+ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
+ memcpy(skb_put(skb, kva->data_len),
+ data_kva, kva->data_len);
+
+ if (!kva->next)
+ break;
- /* call tx interface */
- kni_net_tx(skb, dev);
+ kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
+ data_kva = kva->buf_addr + kva->data_off
+ - kni->mbuf_va + kni->mbuf_kva;
+ }
}
+
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ kni->stats.rx_bytes += len;
+ kni->stats.rx_packets++;
+
+ /* call tx interface */
+ kni_net_tx(skb, dev);
}
/* enqueue all the mbufs from rx_q into free_q */
--
2.5.5
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH v2] kni: add chained mbufs support
2016-04-26 12:37 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
@ 2016-04-27 2:25 ` Zhang, Helin
2016-04-29 14:29 ` Thomas Monjalon
0 siblings, 1 reply; 6+ messages in thread
From: Zhang, Helin @ 2016-04-27 2:25 UTC (permalink / raw)
To: Yigit, Ferruh, dev
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Tuesday, April 26, 2016 8:38 PM
> To: dev@dpdk.org
> Cc: Zhang, Helin <helin.zhang@intel.com>; Yigit, Ferruh <ferruh.yigit@intel.com>
> Subject: [PATCH v2] kni: add chained mbufs support
>
> rx_q fifo may have chained mbufs, merge them into single skb before handing to
> the network stack.
>
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH v2] kni: add chained mbufs support
2016-04-27 2:25 ` Zhang, Helin
@ 2016-04-29 14:29 ` Thomas Monjalon
0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2016-04-29 14:29 UTC (permalink / raw)
To: Yigit, Ferruh; +Cc: dev, Zhang, Helin
> > rx_q fifo may have chained mbufs, merge them into single skb before handing to
> > the network stack.
> >
> > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> Acked-by: Helin Zhang <helin.zhang@intel.com>
Applied, thanks
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2016-04-29 14:29 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-04-25 16:11 [dpdk-dev] [PATCH] kni: add chained mbufs support Ferruh Yigit
2016-04-26 6:49 ` Zhang, Helin
2016-04-26 8:05 ` Ferruh Yigit
2016-04-26 12:37 ` [dpdk-dev] [PATCH v2] " Ferruh Yigit
2016-04-27 2:25 ` Zhang, Helin
2016-04-29 14:29 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).