@@ -508,6 +508,12 @@ AC_DEFUN([OVS_CHECK_LINUX_COMPAT], [
OVS_GREP_IFELSE([$KSRC/include/linux/utsrelease.h], [el6],
[OVS_DEFINE([HAVE_RHEL6_PER_CPU])])
+ if test "$version" = 4 && test "$patchlevel" -le 2; then
+ OVS_DEFINE([OVS_FRAGMENT_BACKPORT])
+ elif test "$version" = 3 && test "$patchlevel" -ge 10; then
+ OVS_DEFINE([OVS_FRAGMENT_BACKPORT])
+ fi
+
if cmp -s datapath/linux/kcompat.h.new \
datapath/linux/kcompat.h >/dev/null 2>&1; then
rm datapath/linux/kcompat.h.new
@@ -22,6 +22,7 @@
#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/netlink.h>
+#include <net/ip.h>
#include <net/route.h>
#include <net/xfrm.h>
@@ -52,4 +53,18 @@ static inline bool skb_encapsulation(struct sk_buff *skb)
#define skb_encapsulation(skb) false
#endif
+#ifdef OVS_FRAGMENT_BACKPORT
+static inline int __init compat_init(void)
+{
+ return ipfrag_init();
+}
+static inline void compat_exit(void)
+{
+ rpl_ipfrag_fini();
+}
+#else
+static inline int __init compat_init(void) { return 0; }
+static inline void compat_exit(void) { }
+#endif
+
#endif /* compat.h */
@@ -2288,10 +2288,14 @@ static int __init dp_init(void)
pr_info("Open vSwitch switching datapath %s\n", VERSION);
- err = action_fifos_init();
+ err = compat_init();
if (err)
goto error;
+ err = action_fifos_init();
+ if (err)
+ goto error_compat_exit;
+
err = ovs_internal_dev_rtnl_link_register();
if (err)
goto error_action_fifos_exit;
@@ -2336,6 +2340,8 @@ error_unreg_rtnl_link:
ovs_internal_dev_rtnl_link_unregister();
error_action_fifos_exit:
action_fifos_exit();
+error_compat_exit:
+ compat_exit();
error:
return err;
}
@@ -2351,6 +2357,7 @@ static void dp_cleanup(void)
ovs_flow_exit();
ovs_internal_dev_rtnl_link_unregister();
action_fifos_exit();
+ compat_exit();
}
module_init(dp_init);
@@ -7,7 +7,9 @@ openvswitch_sources += \
linux/compat/gre.c \
linux/compat/gso.c \
linux/compat/genetlink-openvswitch.c \
+ linux/compat/inet_fragment.c \
linux/compat/ip_gre.c \
+ linux/compat/ip_fragment.c \
linux/compat/ip_tunnel.c \
linux/compat/ip_tunnels_core.c \
linux/compat/lisp.c \
@@ -77,6 +79,7 @@ openvswitch_headers += \
linux/compat/include/net/gre.h \
linux/compat/include/net/inet_ecn.h \
linux/compat/include/net/inet_frag.h \
+ linux/compat/include/net/inetpeer.h \
linux/compat/include/net/ip.h \
linux/compat/include/net/ip_tunnels.h \
linux/compat/include/net/ip6_route.h \
@@ -91,6 +94,7 @@ openvswitch_headers += \
linux/compat/include/net/udp_tunnel.h \
linux/compat/include/net/sock.h \
linux/compat/include/net/stt.h \
+ linux/compat/include/net/vrf.h \
linux/compat/include/net/vxlan.h \
linux/compat/include/net/netfilter/nf_conntrack_core.h \
linux/compat/include/net/netfilter/nf_conntrack_expect.h \
@@ -13,4 +13,61 @@
} while (0)
#endif
+#ifdef OVS_FRAGMENT_BACKPORT
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#define q_flags(q) (q->last_in)
+#define qp_flags(qp) (qp->q.last_in)
+#else
+#define q_flags(q) (q->flags)
+#define qp_flags(qp) (qp->q.flags)
+#endif
+
+/**
+ * struct ovs_inet_frag_queue - fragment queue
+ *
+ * Wrap the system inet_frag_queue to provide a list evictor.
+ *
+ * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ */
+struct ovs_inet_frag_queue {
+ struct inet_frag_queue fq;
+ struct hlist_node list_evictor;
+};
+
+static inline bool rpl_inet_frag_evicting(struct inet_frag_queue *q)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ return (q_flags(q) & INET_FRAG_FIRST_IN) && q->fragments != NULL;
+#else
+ struct ovs_inet_frag_queue *ofq = (struct ovs_inet_frag_queue *)q;
+ return !hlist_unhashed(&ofq->list_evictor);
+#endif
+}
+#define inet_frag_evicting rpl_inet_frag_evicting
+
+static unsigned int rpl_frag_percpu_counter_batch = 130000;
+#define frag_percpu_counter_batch rpl_frag_percpu_counter_batch
+
+static inline void rpl_sub_frag_mem_limit(struct netns_frags *nf, int i)
+{
+ __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+}
+#define sub_frag_mem_limit rpl_sub_frag_mem_limit
+
+static inline void rpl_add_frag_mem_limit(struct netns_frags *nf, int i)
+{
+ __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+}
+#define add_frag_mem_limit rpl_add_frag_mem_limit
+
+int rpl_inet_frags_init(struct inet_frags *f);
+#define inet_frags_init rpl_inet_frags_init
+
+void rpl_inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+#define inet_frags_exit_net rpl_inet_frags_exit_net
+
+void rpl_inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
+#define inet_frag_destroy(q, f, work) rpl_inet_frag_destroy(q, f)
+#endif /* OVS_FRAGMENT_BACKPORT */
+
#endif /* inet_frag.h */
new file mode 100644
@@ -0,0 +1,16 @@
+#ifndef _NET_INETPEER_WRAPPER_H
+#define _NET_INETPEER_WRAPPER_H
+
+#include_next <net/inetpeer.h>
+
+#ifdef OVS_FRAGMENT_BACKPORT
+static inline struct inet_peer *rpl_inet_getpeer_v4(struct inet_peer_base *base,
+ __be32 v4daddr, int vif,
+ int create)
+{
+ return inet_getpeer_v4(base, v4daddr, create);
+}
+#define inet_getpeer_v4 rpl_inet_getpeer_v4
+#endif /* OVS_FRAGMENT_BACKPORT */
+
+#endif /* _NET_INETPEER_WRAPPER_H */
@@ -23,6 +23,10 @@ static inline void rpl_inet_get_local_port_range(struct net *net, int *low,
#endif
+#ifndef IPSKB_FRAG_PMTU
+#define IPSKB_FRAG_PMTU BIT(6)
+#endif
+
/* IPv4 datagram length is stored into 16bit field (tot_len) */
#ifndef IP_MAX_MTU
#define IP_MAX_MTU 0xFFFFU
@@ -106,5 +110,22 @@ static inline int rpl_ip_do_fragment(struct sock *sk, struct sk_buff *skb,
}
#define ip_do_fragment rpl_ip_do_fragment
#endif /* IP_DO_FRAGMENT */
+
+/* Prior to upstream commit d6b915e29f4a ("ip_fragment: don't forward
+ * defragmented DF packet"), IPCB(skb)->frag_max_size was not always populated
+ * correctly, which would lead to reassembled packets not being refragmented.
+ * So, we backport all of ip_defrag() in these cases.
+ */
+int rpl_ip_defrag(struct sk_buff *skb, u32 user);
+#define ip_defrag rpl_ip_defrag
+
+int __init rpl_ipfrag_init(void);
+void rpl_ipfrag_fini(void);
+#else /* OVS_FRAGMENT_BACKPORT */
+static inline int rpl_ipfrag_init(void) { return 0; }
+static inline void rpl_ipfrag_fini(void) { }
#endif /* OVS_FRAGMENT_BACKPORT */
+#define ipfrag_init rpl_ipfrag_init
+#define ipfrag_fini rpl_ipfrag_fini
+
#endif
new file mode 100644
@@ -0,0 +1,26 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_WRAPPER_H
+#define __LINUX_NET_VRF_WRAPPER_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)
+#include_next <net/vrf.h>
+#else
+
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_WRAPPER_H */
new file mode 100644
@@ -0,0 +1,557 @@
+/*
+ * inet fragments management
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Pavel Emelyanov <xemul@openvz.org>
+ * Started as consolidation of ipv4/ip_fragment.c,
+ * ipv6/reassembly. and ipv6 nf conntrack reassembly
+ */
+
+#include <linux/version.h>
+
+#ifdef OVS_FRAGMENT_BACKPORT
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+
+#include <net/sock.h>
+#include <net/inet_frag.h>
+#include <net/inet_ecn.h>
+
+#define INETFRAGS_EVICT_BUCKETS 128
+#define INETFRAGS_EVICT_MAX 512
+
+/* don't rebuild inetfrag table with new secret more often than this */
+#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+
+/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
+ * Value : 0xff if frame should be dropped.
+ * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
+ */
+const u8 ip_frag_ecn_table[16] = {
+ /* at least one fragment had CE, and others ECT_0 or ECT_1 */
+ [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
+ [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
+ [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
+
+ /* invalid combinations : drop frame */
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
+ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
+};
+
+static unsigned int
+inet_frag_hashfn(const struct inet_frags *f, struct inet_frag_queue *q)
+{
+ return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+static bool inet_frag_may_rebuild(struct inet_frags *f)
+{
+ return time_after(jiffies,
+ f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
+}
+
+static void inet_frag_secret_rebuild(struct inet_frags *f)
+{
+ int i;
+
+ write_seqlock_bh(&f->rnd_seqlock);
+
+ if (!inet_frag_may_rebuild(f))
+ goto out;
+
+ get_random_bytes(&f->rnd, sizeof(u32));
+
+ for (i = 0; i < INETFRAGS_HASHSZ; i++) {
+ struct inet_frag_bucket *hb;
+ struct inet_frag_queue *q;
+ struct hlist_node *n;
+
+ hb = &f->hash[i];
+ spin_lock(&hb->chain_lock);
+
+ hlist_for_each_entry_safe(q, n, &hb->chain, list) {
+ unsigned int hval = inet_frag_hashfn(f, q);
+
+ if (hval != i) {
+ struct inet_frag_bucket *hb_dest;
+
+ hlist_del(&q->list);
+
+ /* Relink to new hash chain. */
+ hb_dest = &f->hash[hval];
+
+ /* This is the only place where we take
+ * another chain_lock while already holding
+ * one. As this will not run concurrently,
+ * we cannot deadlock on hb_dest lock below, if its
+ * already locked it will be released soon since
+ * other caller cannot be waiting for hb lock
+ * that we've taken above.
+ */
+ spin_lock_nested(&hb_dest->chain_lock,
+ SINGLE_DEPTH_NESTING);
+ hlist_add_head(&q->list, &hb_dest->chain);
+ spin_unlock(&hb_dest->chain_lock);
+ }
+ }
+ spin_unlock(&hb->chain_lock);
+ }
+
+ f->rebuild = false;
+ f->last_rebuild_jiffies = jiffies;
+out:
+ write_sequnlock_bh(&f->rnd_seqlock);
+}
+
+static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
+{
+ return q->net->low_thresh == 0 ||
+ frag_mem_limit(q->net) >= q->net->low_thresh;
+}
+
+static unsigned int
+inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+ struct ovs_inet_frag_queue *ofq;
+#endif
+ struct inet_frag_queue *fq;
+ struct hlist_node *n;
+ unsigned int evicted = 0;
+ HLIST_HEAD(expired);
+
+ spin_lock(&hb->chain_lock);
+
+ hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+ if (!inet_fragq_should_evict(fq))
+ continue;
+
+ if (!del_timer(&fq->timer))
+ continue;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+ ofq = (struct ovs_inet_frag_queue *)fq;
+ hlist_add_head(&ofq->list_evictor, &expired);
+#else
+ hlist_add_head(&fq->list_evictor, &expired);
+#endif
+ ++evicted;
+ }
+
+ spin_unlock(&hb->chain_lock);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+ hlist_for_each_entry_safe(ofq, n, &expired, list_evictor)
+ f->frag_expire((unsigned long) &ofq->fq);
+#else
+ hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
+ f->frag_expire((unsigned long) fq);
+#endif
+
+ return evicted;
+}
+
+static void inet_frag_worker(struct work_struct *work)
+{
+ unsigned int budget = INETFRAGS_EVICT_BUCKETS;
+ unsigned int i, evicted = 0;
+ struct inet_frags *f;
+
+ f = container_of(work, struct inet_frags, frags_work);
+
+ BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
+
+ local_bh_disable();
+
+ for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+ evicted += inet_evict_bucket(f, &f->hash[i]);
+ i = (i + 1) & (INETFRAGS_HASHSZ - 1);
+ if (evicted > INETFRAGS_EVICT_MAX)
+ break;
+ }
+
+ f->next_bucket = i;
+
+ local_bh_enable();
+
+ if (f->rebuild && inet_frag_may_rebuild(f))
+ inet_frag_secret_rebuild(f);
+}
+
+static void inet_frag_schedule_worker(struct inet_frags *f)
+{
+ if (unlikely(!work_pending(&f->frags_work)))
+ schedule_work(&f->frags_work);
+}
+#endif /* >= 3.17 */
+
+int inet_frags_init(struct inet_frags *f)
+{
+ int i;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ INIT_WORK(&f->frags_work, inet_frag_worker);
+#endif
+
+ for (i = 0; i < INETFRAGS_HASHSZ; i++) {
+ struct inet_frag_bucket *hb = &f->hash[i];
+
+ spin_lock_init(&hb->chain_lock);
+ INIT_HLIST_HEAD(&hb->chain);
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ rwlock_init(&f->lock);
+ f->secret_timer.expires = jiffies + f->secret_interval;
+#else
+ seqlock_init(&f->rnd_seqlock);
+ f->last_rebuild_jiffies = 0;
+ f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
+ NULL);
+ if (!f->frags_cachep)
+ return -ENOMEM;
+#endif
+
+ return 0;
+}
+
+void inet_frags_fini(struct inet_frags *f)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ cancel_work_sync(&f->frags_work);
+ kmem_cache_destroy(f->frags_cachep);
+#endif
+}
+
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+{
+ read_lock_bh(&f->lock);
+ inet_frag_evictor(nf, f, true);
+ read_unlock_bh(&f->lock);
+}
+#else
+void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+{
+ unsigned int seq;
+
+evict_again:
+ local_bh_disable();
+ seq = read_seqbegin(&f->rnd_seqlock);
+
+ inet_frag_evictor(nf, f, true);
+
+ local_bh_enable();
+ cond_resched();
+
+ if (read_seqretry(&f->rnd_seqlock, seq) ||
+ percpu_counter_sum(&nf->mem))
+ goto evict_again;
+}
+#endif
+
+static struct inet_frag_bucket *
+get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+__acquires(f->lock)
+#else
+__acquires(hb->chain_lock)
+#endif
+{
+ struct inet_frag_bucket *hb;
+ unsigned int hash;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ read_lock(&f->lock);
+#else
+ unsigned int seq;
+ restart:
+ seq = read_seqbegin(&f->rnd_seqlock);
+#endif
+
+ hash = inet_frag_hashfn(f, fq);
+ hb = &f->hash[hash];
+
+ spin_lock(&hb->chain_lock);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ if (read_seqretry(&f->rnd_seqlock, seq)) {
+ spin_unlock(&hb->chain_lock);
+ goto restart;
+ }
+#endif
+
+ return hb;
+}
+
+static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+{
+ struct inet_frag_bucket *hb;
+
+ hb = get_frag_bucket_locked(fq, f);
+ hlist_del(&fq->list);
+ q_flags(fq) |= INET_FRAG_COMPLETE;
+ spin_unlock(&hb->chain_lock);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ read_unlock(&f->lock);
+#endif
+}
+
+void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
+{
+ if (del_timer(&fq->timer))
+ atomic_dec(&fq->refcnt);
+
+ if (!(q_flags(fq) & INET_FRAG_COMPLETE)) {
+ fq_unlink(fq, f);
+ atomic_dec(&fq->refcnt);
+ }
+}
+
+static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
+ struct sk_buff *skb)
+{
+ if (f->skb_free)
+ f->skb_free(skb);
+ kfree_skb(skb);
+}
+
+void rpl_inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
+{
+ struct sk_buff *fp;
+ struct netns_frags *nf;
+ unsigned int sum, sum_truesize = 0;
+
+ WARN_ON(!(q_flags(q) & INET_FRAG_COMPLETE));
+ WARN_ON(del_timer(&q->timer) != 0);
+
+ /* Release all fragment data. */
+ fp = q->fragments;
+ nf = q->net;
+ while (fp) {
+ struct sk_buff *xp = fp->next;
+
+ sum_truesize += fp->truesize;
+ frag_kfree_skb(nf, f, fp);
+ fp = xp;
+ }
+ sum = sum_truesize + f->qsize;
+
+ if (f->destructor)
+ f->destructor(q);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ kfree(q);
+#else
+ kmem_cache_free(f->frags_cachep, q);
+#endif
+
+ sub_frag_mem_limit(nf, sum);
+}
+
+int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ struct inet_frag_queue *q;
+ int work, evicted = 0;
+
+ work = frag_mem_limit(nf) - nf->low_thresh;
+ while (work > 0 || force) {
+ spin_lock(&nf->lru_lock);
+
+ if (list_empty(&nf->lru_list)) {
+ spin_unlock(&nf->lru_lock);
+ break;
+ }
+
+ q = list_first_entry(&nf->lru_list,
+ struct inet_frag_queue, lru_list);
+ atomic_inc(&q->refcnt);
+ /* Remove q from list to avoid several CPUs grabbing it */
+ list_del_init(&q->lru_list);
+
+ spin_unlock(&nf->lru_lock);
+
+ spin_lock(&q->lock);
+ if (!(q->last_in & INET_FRAG_COMPLETE))
+ inet_frag_kill(q, f);
+ spin_unlock(&q->lock);
+
+ if (atomic_dec_and_test(&q->refcnt))
+ inet_frag_destroy(q, f, &work);
+ evicted++;
+ }
+
+ return evicted;
+#else
+ int i;
+
+ for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+ inet_evict_bucket(f, &f->hash[i]);
+
+ return 0;
+#endif
+}
+
+static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
+ struct inet_frag_queue *qp_in,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
+ struct inet_frag_queue *qp;
+
+#ifdef CONFIG_SMP
+ /* With SMP race we have to recheck hash table, because
+ * such entry could have been created on other cpu before
+ * we acquired hash bucket lock.
+ */
+ hlist_for_each_entry(qp, &hb->chain, list) {
+ if (qp->net == nf && f->match(qp, arg)) {
+ atomic_inc(&qp->refcnt);
+ spin_unlock(&hb->chain_lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ read_unlock(&f->lock);
+#endif
+ q_flags(qp_in) |= INET_FRAG_COMPLETE;
+ inet_frag_put(qp_in, f);
+ return qp;
+ }
+ }
+#endif /* CONFIG_SMP */
+ qp = qp_in;
+ if (!mod_timer(&qp->timer, jiffies + nf->timeout))
+ atomic_inc(&qp->refcnt);
+
+ atomic_inc(&qp->refcnt);
+ hlist_add_head(&qp->list, &hb->chain);
+
+ spin_unlock(&hb->chain_lock);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ read_unlock(&f->lock);
+#endif
+
+ return qp;
+}
+
+static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_queue *q;
+
+ if (frag_mem_limit(nf) > nf->high_thresh) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ inet_frag_schedule_worker(f);
+#endif
+ return NULL;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ q = kzalloc(f->qsize, GFP_ATOMIC);
+#else
+ q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+#endif
+ if (!q)
+ return NULL;
+
+ q->net = nf;
+ f->constructor(q, arg);
+ add_frag_mem_limit(nf, f->qsize);
+
+ setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
+ spin_lock_init(&q->lock);
+ atomic_set(&q->refcnt, 1);
+
+ return q;
+}
+
+static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
+ struct inet_frags *f,
+ void *arg)
+{
+ struct inet_frag_queue *q;
+
+ q = inet_frag_alloc(nf, f, arg);
+ if (!q)
+ return NULL;
+
+ return inet_frag_intern(nf, q, f, arg);
+}
+
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key,
+ unsigned int hash)
+{
+ struct inet_frag_bucket *hb;
+ struct inet_frag_queue *q;
+ int depth = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+ if (frag_mem_limit(nf) > nf->high_thresh)
+ inet_frag_evictor(nf, f, false);
+#else
+ if (frag_mem_limit(nf) > nf->low_thresh)
+ inet_frag_schedule_worker(f);
+#endif
+
+ hash &= (INETFRAGS_HASHSZ - 1);
+ hb = &f->hash[hash];
+
+ spin_lock(&hb->chain_lock);
+ hlist_for_each_entry(q, &hb->chain, list) {
+ if (q->net == nf && f->match(q, key)) {
+ atomic_inc(&q->refcnt);
+ spin_unlock(&hb->chain_lock);
+ return q;
+ }
+ depth++;
+ }
+ spin_unlock(&hb->chain_lock);
+
+ if (depth <= INETFRAGS_MAXDEPTH)
+ return inet_frag_create(nf, f, key);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ if (inet_frag_may_rebuild(f)) {
+ if (!f->rebuild)
+ f->rebuild = true;
+ inet_frag_schedule_worker(f);
+ }
+#endif
+
+ return ERR_PTR(-ENOBUFS);
+}
+
+void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+ const char *prefix)
+{
+ static const char msg[] = "inet_frag_find: Fragment hash bucket"
+ " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
+ ". Dropping fragment.\n";
+
+ if (PTR_ERR(q) == -ENOBUFS)
+ net_dbg_ratelimited("%s%s", prefix, msg);
+}
+
+#endif /* OVS_FRAGMENT_BACKPORT */
new file mode 100644
@@ -0,0 +1,737 @@
+/*
+ * IP fragmentation backport, heavily based on linux/net/ipv4/ip_fragment.c,
+ * copied from Linux 192132b9a034 net: Add support for VRFs to inetpeer cache
+ *
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * The IP fragmentation functionality.
+ *
+ * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes:
+ * Alan Cox : Split from ip.c , see ip_input.c for history.
+ * David S. Miller : Begin massive cleanup...
+ * Andi Kleen : Add sysctls.
+ * xxxx : Overlapfrag bug.
+ * Ultima : ip_expire() kernel panic.
+ * Bill Hawes : Frag accounting and evictor fixes.
+ * John McDonald : 0 length frag bug.
+ * Alexey Kuznetsov: SMP races, threading, cleanup.
+ * Patrick McHardy : LRU queue of frag heads for evictor.
+ */
+
+#include <linux/version.h>
+
+#ifdef OVS_FRAGMENT_BACKPORT
+
+#define pr_fmt(fmt) "IPv4: " fmt
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/netdevice.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <net/route.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/checksum.h>
+#include <net/inetpeer.h>
+#include <net/inet_frag.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/inet.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/inet_ecn.h>
+#include <net/vrf.h>
+#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
+ * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
+ * as well. Or notify me, at least. --ANK
+ */
+
+static int sysctl_ipfrag_max_dist __read_mostly = 64;
+static const char ip_frag_cache_name[] = "ip4-frags";
+
+struct ipfrag_skb_cb
+{
+ struct inet_skb_parm h;
+ int offset;
+};
+
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+/* Describe an entry in the "incomplete datagrams" queue. */
+struct ipq {
+ union {
+ struct inet_frag_queue q;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+ struct ovs_inet_frag_queue oq;
+#endif
+ };
+
+ u32 user;
+ __be32 saddr;
+ __be32 daddr;
+ __be16 id;
+ u8 protocol;
+ u8 ecn; /* RFC3168 support */
+ u16 max_df_size; /* largest frag with DF set seen */
+ int iif;
+ int vif; /* VRF device index */
+ unsigned int rid;
+ struct inet_peer *peer;
+};
+
+static u8 ip4_frag_ecn(u8 tos)
+{
+ return 1 << (tos & INET_ECN_MASK);
+}
+
+static struct inet_frags ip4_frags;
+
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ struct net_device *dev);
+
+struct ip4_create_arg {
+ struct iphdr *iph;
+ u32 user;
+ int vif;
+};
+
+static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
+{
+ net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
+ return jhash_3words((__force u32)id << 16 | prot,
+ (__force u32)saddr, (__force u32)daddr,
+ ip4_frags.rnd);
+}
+
+static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
+{
+ const struct ipq *ipq;
+
+ ipq = container_of(q, struct ipq, q);
+ return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
+}
+
+static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
+{
+ const struct ipq *qp;
+ const struct ip4_create_arg *arg = a;
+
+ qp = container_of(q, struct ipq, q);
+ return qp->id == arg->iph->id &&
+ qp->saddr == arg->iph->saddr &&
+ qp->daddr == arg->iph->daddr &&
+ qp->protocol == arg->iph->protocol &&
+ qp->user == arg->user &&
+ qp->vif == arg->vif;
+}
+
+static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct ipq *qp = container_of(q, struct ipq, q);
+ struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
+ frags);
+ struct net *net = container_of(ipv4, struct net, ipv4);
+
+ const struct ip4_create_arg *arg = a;
+
+ qp->protocol = arg->iph->protocol;
+ qp->id = arg->iph->id;
+ qp->ecn = ip4_frag_ecn(arg->iph->tos);
+ qp->saddr = arg->iph->saddr;
+ qp->daddr = arg->iph->daddr;
+ qp->vif = arg->vif;
+ qp->user = arg->user;
+ qp->peer = sysctl_ipfrag_max_dist ?
+ inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+ NULL;
+}
+
+static void ip4_frag_free(struct inet_frag_queue *q)
+{
+ struct ipq *qp;
+
+ qp = container_of(q, struct ipq, q);
+ if (qp->peer)
+ inet_putpeer(qp->peer);
+}
+
+
+/* Destruction primitives. */
+
+static void ipq_put(struct ipq *ipq)
+{
+ inet_frag_put(&ipq->q, &ip4_frags);
+}
+
+/* Kill ipq entry. It is not destroyed immediately,
+ * because caller (and someone more) holds reference count.
+ */
+static void ipq_kill(struct ipq *ipq)
+{
+ inet_frag_kill(&ipq->q, &ip4_frags);
+}
+
+static bool frag_expire_skip_icmp(u32 user)
+{
+ return user == IP_DEFRAG_AF_PACKET ||
+ ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
+ __IP_DEFRAG_CONNTRACK_IN_END) ||
+ ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
+}
+
+/*
+ * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
+ */
+static void ip_expire(unsigned long arg)
+{
+ struct ipq *qp;
+ struct net *net;
+
+ qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
+ net = container_of(qp->q.net, struct net, ipv4.frags);
+
+ spin_lock(&qp->q.lock);
+
+ if (qp_flags(qp) & INET_FRAG_COMPLETE)
+ goto out;
+
+ ipq_kill(qp);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+
+ if (!inet_frag_evicting(&qp->q)) {
+ struct sk_buff *head = qp->q.fragments;
+ const struct iphdr *iph;
+ int err;
+
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+
+ if (!(qp_flags(qp) & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+ goto out;
+
+ rcu_read_lock();
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (err)
+ goto out_rcu_unlock;
+
+ /* Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (frag_expire_skip_icmp(qp->user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out_rcu_unlock;
+
+ /* Send an ICMP "Fragment Reassembly Timeout" message. */
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+out_rcu_unlock:
+ rcu_read_unlock();
+ }
+out:
+ spin_unlock(&qp->q.lock);
+ ipq_put(qp);
+}
+
+/* Find the correct entry in the "incomplete datagrams" queue for
+ * this IP datagram, and create new one, if nothing is found.
+ */
+static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+ u32 user, int vif)
+{
+ struct inet_frag_queue *q;
+ struct ip4_create_arg arg;
+ unsigned int hash;
+
+ arg.iph = iph;
+ arg.user = user;
+ arg.vif = vif;
+
+ hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+
+ q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+ }
+ return container_of(q, struct ipq, q);
+}
+
+/* Is the fragment too far ahead to be part of ipq? */
+static int ip_frag_too_far(struct ipq *qp)
+{
+ struct inet_peer *peer = qp->peer;
+ unsigned int max = sysctl_ipfrag_max_dist;
+ unsigned int start, end;
+
+ int rc;
+
+ if (!peer || !max)
+ return 0;
+
+ start = qp->rid;
+ end = atomic_inc_return(&peer->rid);
+ qp->rid = end;
+
+ rc = qp->q.fragments && (end - start) > max;
+
+ if (rc) {
+ struct net *net;
+
+ net = container_of(qp->q.net, struct net, ipv4.frags);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+ }
+
+ return rc;
+}
+
+static int ip_frag_reinit(struct ipq *qp)
+{
+ struct sk_buff *fp;
+ unsigned int sum_truesize = 0;
+
+ if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
+ atomic_inc(&qp->q.refcnt);
+ return -ETIMEDOUT;
+ }
+
+ fp = qp->q.fragments;
+ do {
+ struct sk_buff *xp = fp->next;
+
+ sum_truesize += fp->truesize;
+ kfree_skb(fp);
+ fp = xp;
+ } while (fp);
+ sub_frag_mem_limit(qp->q.net, sum_truesize);
+
+ qp_flags(qp) = 0;
+ qp->q.len = 0;
+ qp->q.meat = 0;
+ qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
+ qp->iif = 0;
+ qp->ecn = 0;
+
+ return 0;
+}
+
+/* Add new segment to existing queue. */
+static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+{
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ unsigned int fragsize;
+ int flags, offset;
+ int ihl, end;
+ int err = -ENOENT;
+ u8 ecn;
+
+ if (qp_flags(qp) & INET_FRAG_COMPLETE)
+ goto err;
+
+ if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
+ unlikely(ip_frag_too_far(qp)) &&
+ unlikely(err = ip_frag_reinit(qp))) {
+ ipq_kill(qp);
+ goto err;
+ }
+
+ ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
+ offset = ntohs(ip_hdr(skb)->frag_off);
+ flags = offset & ~IP_OFFSET;
+ offset &= IP_OFFSET;
+ offset <<= 3; /* offset is in 8-byte chunks */
+ ihl = ip_hdrlen(skb);
+
+ /* Determine the position of this fragment. */
+ end = offset + skb->len - skb_network_offset(skb) - ihl;
+ err = -EINVAL;
+
+ /* Is this the final fragment? */
+ if ((flags & IP_MF) == 0) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < qp->q.len ||
+ ((qp_flags(qp) & INET_FRAG_LAST_IN) && end != qp->q.len))
+ goto err;
+ qp_flags(qp) |= INET_FRAG_LAST_IN;
+ qp->q.len = end;
+ } else {
+ if (end&7) {
+ end &= ~7;
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ if (end > qp->q.len) {
+ /* Some bits beyond end -> corruption. */
+ if (qp_flags(qp) & INET_FRAG_LAST_IN)
+ goto err;
+ qp->q.len = end;
+ }
+ }
+ if (end == offset)
+ goto err;
+
+ err = -ENOMEM;
+ if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+ goto err;
+
+ err = pskb_trim_rcsum(skb, end - offset);
+ if (err)
+ goto err;
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = qp->q.fragments_tail;
+ if (!prev || FRAG_CB(prev)->offset < offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = qp->q.fragments; next != NULL; next = next->next) {
+ if (FRAG_CB(next)->offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+found:
+ /* We found where to put this one. Check for overlap with
+ * preceding fragment, and, if needed, align things so that
+ * any overlaps are eliminated.
+ */
+ if (prev) {
+ int i = (FRAG_CB(prev)->offset + prev->len) - offset;
+
+ if (i > 0) {
+ offset += i;
+ err = -EINVAL;
+ if (end <= offset)
+ goto err;
+ err = -ENOMEM;
+ if (!pskb_pull(skb, i))
+ goto err;
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+
+ err = -ENOMEM;
+
+ while (next && FRAG_CB(next)->offset < end) {
+ int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
+
+ if (i < next->len) {
+ /* Eat head of the next overlapped fragment
+ * and leave the loop. The next ones cannot overlap.
+ */
+ if (!pskb_pull(next, i))
+ goto err;
+ FRAG_CB(next)->offset += i;
+ qp->q.meat -= i;
+ if (next->ip_summed != CHECKSUM_UNNECESSARY)
+ next->ip_summed = CHECKSUM_NONE;
+ break;
+ } else {
+ struct sk_buff *free_it = next;
+
+ /* Old fragment is completely overridden with
+ * new one drop it.
+ */
+ next = next->next;
+
+ if (prev)
+ prev->next = next;
+ else
+ qp->q.fragments = next;
+
+ qp->q.meat -= free_it->len;
+ sub_frag_mem_limit(qp->q.net, free_it->truesize);
+ kfree_skb(free_it);
+ }
+ }
+
+ FRAG_CB(skb)->offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (!next)
+ qp->q.fragments_tail = skb;
+ if (prev)
+ prev->next = skb;
+ else
+ qp->q.fragments = skb;
+
+ dev = skb->dev;
+ if (dev) {
+ qp->iif = dev->ifindex;
+ skb->dev = NULL;
+ }
+ qp->q.stamp = skb->tstamp;
+ qp->q.meat += skb->len;
+ qp->ecn |= ecn;
+ add_frag_mem_limit(qp->q.net, skb->truesize);
+ if (offset == 0)
+ qp_flags(qp) |= INET_FRAG_FIRST_IN;
+
+ fragsize = skb->len + ihl;
+
+ if (fragsize > qp->q.max_size)
+ qp->q.max_size = fragsize;
+
+ if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
+ fragsize > qp->max_df_size)
+ qp->max_df_size = fragsize;
+
+ if (qp_flags(qp) == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ qp->q.meat == qp->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ err = ip_frag_reasm(qp, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return err;
+ }
+
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
+err:
+ kfree_skb(skb);
+ return err;
+}
+
+
+/* Build a new IP datagram from all its fragments. */
+
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ struct net_device *dev)
+{
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct iphdr *iph;
+ struct sk_buff *fp, *head = qp->q.fragments;
+ int len;
+ int ihlen;
+ int err;
+ u8 ecn;
+
+ ipq_kill(qp);
+
+ ecn = ip_frag_ecn_table[qp->ecn];
+ if (unlikely(ecn == 0xff)) {
+ err = -EINVAL;
+ goto out_fail;
+ }
+ /* Make the one we just received the head. */
+ if (prev) {
+ head = prev->next;
+ fp = skb_clone(head, GFP_ATOMIC);
+ if (!fp)
+ goto out_nomem;
+
+ fp->next = head->next;
+ if (!fp->next)
+ qp->q.fragments_tail = fp;
+ prev->next = fp;
+
+ skb_morph(head, qp->q.fragments);
+ head->next = qp->q.fragments->next;
+
+ consume_skb(qp->q.fragments);
+ qp->q.fragments = head;
+ }
+
+ WARN_ON(!head);
+ WARN_ON(FRAG_CB(head)->offset != 0);
+
+ /* Allocate a new buffer for the datagram. */
+ ihlen = ip_hdrlen(head);
+ len = ihlen + qp->q.len;
+
+ err = -E2BIG;
+ if (len > 65535)
+ goto out_oversize;
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ goto out_nomem;
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments. */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ goto out_nomem;
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = clone->data_len = head->data_len - plen;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+ add_frag_mem_limit(qp->q.net, clone->truesize);
+ }
+
+ skb_shinfo(head)->frag_list = head->next;
+ skb_push(head, head->data - skb_network_header(head));
+
+ for (fp=head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ }
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
+ head->next = NULL;
+ head->dev = dev;
+ head->tstamp = qp->q.stamp;
+ IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
+
+ iph = ip_hdr(head);
+ iph->tot_len = htons(len);
+ iph->tos |= ecn;
+
+ /* When we set IP_DF on a refragmented skb we must also force a
+ * call to ip_fragment to avoid forwarding a DF-skb of size s while
+ * original sender only sent fragments of size f (where f < s).
+ *
+ * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
+ * frag seen to avoid sending tiny DF-fragments in case skb was built
+ * from one very small df-fragment and one large non-df frag.
+ */
+ if (qp->max_df_size == qp->q.max_size) {
+ IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ iph->frag_off = htons(IP_DF);
+ } else {
+ iph->frag_off = 0;
+ }
+
+ ip_send_check(iph);
+
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+ qp->q.fragments = NULL;
+ qp->q.fragments_tail = NULL;
+ return 0;
+
+out_nomem:
+ net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
+ err = -ENOMEM;
+ goto out_fail;
+out_oversize:
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
+out_fail:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+ return err;
+}
+
+/* Process an incoming IP datagram fragment. */
+int rpl_ip_defrag(struct sk_buff *skb, u32 user)
+{
+ struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+ int vif = vrf_master_ifindex_rcu(dev);
+ struct net *net = dev_net(dev);
+ struct ipq *qp;
+
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+
+ /* Lookup (or create) queue header */
+ qp = ip_find(net, ip_hdr(skb), user, vif);
+ if (qp) {
+ int ret;
+
+ spin_lock(&qp->q.lock);
+
+ ret = ip_frag_queue(qp, skb);
+
+ spin_unlock(&qp->q.lock);
+ ipq_put(qp);
+ return ret;
+ }
+
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+ kfree_skb(skb);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(rpl_ip_defrag);
+
+static int __net_init ipv4_frags_init_net(struct net *net)
+{
+ nf_defrag_ipv4_enable();
+
+ return 0;
+}
+
+static void __net_exit ipv4_frags_exit_net(struct net *net)
+{
+ inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+}
+
+static struct pernet_operations ip4_frags_ops = {
+ .init = ipv4_frags_init_net,
+ .exit = ipv4_frags_exit_net,
+};
+
+int __init rpl_ipfrag_init(void)
+{
+ register_pernet_subsys(&ip4_frags_ops);
+ ip4_frags.hashfn = ip4_hashfn;
+ ip4_frags.constructor = ip4_frag_init;
+ ip4_frags.destructor = ip4_frag_free;
+ ip4_frags.skb_free = NULL;
+ ip4_frags.qsize = sizeof(struct ipq);
+ ip4_frags.match = ip4_frag_match;
+ ip4_frags.frag_expire = ip_expire;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)
+ ip4_frags.frags_cache_name = ip_frag_cache_name;
+#endif
+ if (inet_frags_init(&ip4_frags)) {
+ pr_warn("IP: failed to allocate ip4_frags cache\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void rpl_ipfrag_fini(void)
+{
+ inet_frags_fini(&ip4_frags);
+ unregister_pernet_subsys(&ip4_frags_ops);
+}
+
+#endif /* OVS_FRAGMENT_BACKPORT */
Backport IPv4 reassembly from the upstream commit caaecdd3d3f8 ("inet: frags: remove INET_FRAG_EVICTED and use list_evictor for the test"). This is necessary because kernels prior to upstream commit d6b915e29f4a ("ip_fragment: don't forward defragmented DF packet") would not always track the maximum received unit size during ip_defrag(). Without the MRU, refragmentation cannot occur so reassembled packets are dropped. Signed-off-by: Joe Stringer <joestringer@nicira.com> --- acinclude.m4 | 6 + datapath/compat.h | 15 + datapath/datapath.c | 9 +- datapath/linux/Modules.mk | 4 + datapath/linux/compat/include/net/inet_frag.h | 57 ++ datapath/linux/compat/include/net/inetpeer.h | 16 + datapath/linux/compat/include/net/ip.h | 21 + datapath/linux/compat/include/net/vrf.h | 26 + datapath/linux/compat/inet_fragment.c | 557 +++++++++++++++++++ datapath/linux/compat/ip_fragment.c | 737 ++++++++++++++++++++++++++ 10 files changed, 1447 insertions(+), 1 deletion(-) create mode 100644 datapath/linux/compat/include/net/inetpeer.h create mode 100644 datapath/linux/compat/include/net/vrf.h create mode 100644 datapath/linux/compat/inet_fragment.c create mode 100644 datapath/linux/compat/ip_fragment.c