@@ -228,6 +228,7 @@ enum {
SKB_FCLONE_UNAVAILABLE,
SKB_FCLONE_ORIG,
SKB_FCLONE_CLONE,
+ SKB_MUST_COPY,
};
enum {
@@ -449,13 +450,19 @@ extern struct sk_buff *__alloc_skb(unsigned int size,
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 0, -1);
+ return __alloc_skb(size, priority, SKB_FCLONE_UNAVAILABLE, -1);
}
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 1, -1);
+ return __alloc_skb(size, priority, SKB_FCLONE_ORIG, -1);
+}
+
+static inline struct sk_buff *alloc_skb_mustcopy(unsigned int size,
+ gfp_t priority)
+{
+ return __alloc_skb(size, priority, SKB_MUST_COPY, -1);
}
extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
@@ -177,7 +177,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
struct sk_buff *skb;
u8 *data;
- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+ cache = skbuff_head_cache;
+ if (fclone == SKB_FCLONE_ORIG)
+ cache = skbuff_fclone_cache;
/* Get the HEAD */
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
@@ -220,13 +222,13 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb_frag_list_init(skb);
memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
- if (fclone) {
+ skb->fclone = fclone;
+ if (fclone == SKB_FCLONE_ORIG) {
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
kmemcheck_annotate_bitfield(child, flags1);
kmemcheck_annotate_bitfield(child, flags2);
- skb->fclone = SKB_FCLONE_ORIG;
atomic_set(fclone_ref, 1);
child->fclone = SKB_FCLONE_UNAVAILABLE;
@@ -259,7 +261,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
struct sk_buff *skb;
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+ SKB_FCLONE_UNAVAILABLE, node);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
skb->dev = dev;
@@ -364,6 +367,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
+ case SKB_MUST_COPY:
kmem_cache_free(skbuff_head_cache, skb);
break;
@@ -493,7 +497,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
{
struct skb_shared_info *shinfo;
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ if (skb_is_nonlinear(skb) ||
+ skb->fclone == SKB_FCLONE_ORIG ||
+ skb->fclone == SKB_FCLONE_CLONE)
return 0;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
@@ -640,6 +646,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
atomic_t *fclone_ref = (atomic_t *) (n + 1);
n->fclone = SKB_FCLONE_CLONE;
atomic_inc(fclone_ref);
+ } else if (skb->fclone == SKB_MUST_COPY) {
+ return skb_copy(skb, gfp_mask);
} else {
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)