@@ -615,9 +615,10 @@ dp_packet_set_size(struct dp_packet *b, uint32_t v)
* (and thus 'v') will always be <= UINT16_MAX; this means that there is no
* loss of accuracy in assigning 'v' to 'data_len'.
*/
- b->mbuf.data_len = (uint16_t)v; /* Current seg length. */
- b->mbuf.pkt_len = v; /* Total length of all segments linked to
- * this segment. */
+ /* Current seg length. */
+ b->mbuf.data_len += (uint16_t)(v - b->mbuf.pkt_len);
+ /* Total length of all segments linked to this segment. */
+ b->mbuf.pkt_len = v;
}
static inline uint16_t
@@ -2378,29 +2378,54 @@ netdev_dpdk_prep_hwol_packet(struct netdev_dpdk *dev, struct rte_mbuf *mbuf)
return true;
}
- mbuf->l2_len = (char *) dp_packet_l3(pkt) - (char *) dp_packet_eth(pkt);
- mbuf->l3_len = (char *) dp_packet_l4(pkt) - (char *) dp_packet_l3(pkt);
- mbuf->l4_len = 0;
- mbuf->outer_l2_len = 0;
- mbuf->outer_l3_len = 0;
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ if (mbuf->ol_flags &
+ (RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+ mbuf->outer_l2_len = (char *) dp_packet_l3(pkt) -
+ (char *) dp_packet_eth(pkt);
+ mbuf->outer_l3_len = (char *) dp_packet_l4(pkt) -
+ (char *) dp_packet_l3(pkt);
+ } else {
+ mbuf->l2_len = (char *) dp_packet_l3(pkt) -
+ (char *) dp_packet_eth(pkt);
+ mbuf->l3_len = (char *) dp_packet_l4(pkt) -
+ (char *) dp_packet_l3(pkt);
+ mbuf->outer_l2_len = 0;
+ mbuf->outer_l3_len = 0;
+ }
+ }
if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
struct tcp_header *th = dp_packet_l4(pkt);
if (!th) {
- VLOG_WARN_RL(&rl, "%s: TCP Segmentation without L4 header"
- " pkt len: %"PRIu32"", dev->up.name, mbuf->pkt_len);
+ VLOG_WARN_RL(&rl,
+ "%s: TCP Segmentation without L4 header,pkt len: %"PRIu32"",
+ dev->up.name, mbuf->pkt_len);
return false;
}
- mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
- mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
- mbuf->tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len;
+ if (mbuf->ol_flags & (RTE_MBUF_F_TX_TUNNEL_GENEVE |
+ RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+ mbuf->tso_segsz = dev->mtu - mbuf->l2_len - mbuf->l3_len -
+ mbuf->l4_len - mbuf->outer_l3_len;
+ } else {
+ mbuf->l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
+ mbuf->tso_segsz = dev->mtu - mbuf->l3_len - mbuf->l4_len;
+ }
+
+ mbuf->ol_flags &= (~RTE_MBUF_F_TX_TCP_CKSUM);
if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV4) {
mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
}
}
+
+ /* when tcp checksum offload, ip checksum is set to 0 */
+ if ((mbuf->ol_flags & (RTE_MBUF_F_TX_TCP_CKSUM | RTE_MBUF_F_TX_UDP_CKSUM))
+ && !(mbuf->ol_flags & RTE_MBUF_F_TX_IPV6))
+ mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+
return true;
}
@@ -184,6 +184,15 @@ netdev_tnl_push_ip_header(struct dp_packet *packet, const void *header,
dp_packet_ol_reset_ip_csum_good(packet);
*ip_tot_size -= IP_HEADER_LEN;
packet->l4_ofs = dp_packet_size(packet) - *ip_tot_size;
+
+ if (packet->mbuf.ol_flags &
+ (RTE_MBUF_F_TX_TUNNEL_GENEVE | RTE_MBUF_F_TX_TUNNEL_VXLAN)) {
+ packet->mbuf.ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
+ packet->mbuf.ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ } else {
+ ip->ip_csum = recalc_csum16(ip->ip_csum, 0, ip->ip_tot_len);
+ }
+
return ip + 1;
}
}
@@ -232,6 +241,45 @@ netdev_tnl_push_udp_header(const struct netdev *netdev OVS_UNUSED,
{
struct udp_header *udp;
int ip_tot_size;
+ uint8_t opt_len = 0;
+ struct eth_header *eth;
+ struct ip_header *ip;
+ struct genevehdr *gnh;
+
+ if (dp_packet_hwol_l4_mask(packet)) {
+ struct ip_header *ip = dp_packet_l3(packet);
+
+ if (ip->ip_proto == IPPROTO_TCP) {
+ struct tcp_header *th = dp_packet_l4(packet);
+
+ packet->mbuf.l4_len = TCP_OFFSET(th->tcp_ctl) * 4;
+ } else if (ip->ip_proto == IPPROTO_UDP)
+ packet->mbuf.l4_len = UDP_HEADER_LEN;
+
+ packet->mbuf.l3_len = (char *) dp_packet_l4(packet) -
+ (char *) dp_packet_l3(packet);
+
+ if ((packet->mbuf.ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
+ !(packet->mbuf.ol_flags & RTE_MBUF_F_TX_IPV6))
+ packet->mbuf.ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+
+ if (!strcmp(netdev_get_type(netdev), "geneve")) {
+ eth = (data->header);
+ ip = eth + 1;
+ udp = ip + 1;
+ gnh = udp + 1;
+ opt_len = gnh->opt_len * 4;
+ packet->mbuf.ol_flags |= RTE_MBUF_F_TX_TUNNEL_GENEVE;
+ packet->mbuf.l2_len = (char *) dp_packet_l3(packet) -
+ (char *) dp_packet_eth(packet) +
+ GENEVE_BASE_HLEN + opt_len;
+ } else if (!strcmp(netdev_get_type(netdev), "vxlan")) {
+ packet->mbuf.ol_flags |= RTE_MBUF_F_TX_TUNNEL_VXLAN;
+ packet->mbuf.l2_len = (char *) dp_packet_l3(packet) -
+ (char *) dp_packet_eth(packet) +
+ VXLAN_HLEN;
+ }
+ }
udp = netdev_tnl_push_ip_header(packet, data->header, data->header_len,
&ip_tot_size, 0);
@@ -953,13 +953,16 @@ netdev_push_header(const struct netdev *netdev,
size_t i, size = dp_packet_batch_size(batch);
DP_PACKET_BATCH_REFILL_FOR_EACH (i, size, packet, batch) {
- if (OVS_UNLIKELY(dp_packet_hwol_is_tso(packet))) {
- COVERAGE_INC(netdev_push_header_drops);
- dp_packet_delete(packet);
- VLOG_WARN_RL(&rl, "%s: Tunneling packets with TSO is "
- "not supported: packet dropped",
- netdev_get_name(netdev));
- } else {
+ if (OVS_UNLIKELY(strcmp(netdev_get_type(netdev), "vxlan") &&
+ strcmp(netdev_get_type(netdev), "geneve") &&
+ (dp_packet_hwol_l4_mask(packet) || dp_packet_hwol_is_tso(packet)))) {
+ COVERAGE_INC(netdev_push_header_drops);
+ dp_packet_delete(packet);
+ VLOG_WARN_RL(&rl,
+ "%s: Tunneling packets with csum or tso HW offload flags"
+ "is not supported: packet dropped",
+ netdev_get_name(netdev));
+ } else {
/* The packet is going to be encapsulated and there is
* no support yet for inner network header csum offloading. */
dp_packet_ol_send_prepare(packet, 0);
Signed-off-by: Dexia Li <dexia.li@jaguarmicro.com> --- lib/dp-packet.h | 7 +++--- lib/netdev-dpdk.c | 45 +++++++++++++++++++++++++++++--------- lib/netdev-native-tnl.c | 48 +++++++++++++++++++++++++++++++++++++++++ lib/netdev.c | 17 +++++++++------ 4 files changed, 97 insertions(+), 20 deletions(-)