@@ -242,6 +242,15 @@ struct ubuf_info {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
+ /* Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor */
+ void *destructor_arg;
+
+ /*
+ * Warning: all fields from here until dataref are cleared in
+ * __alloc_skb()
+ *
+ */
unsigned char nr_frags;
__u8 tx_flags;
unsigned short gso_size;
@@ -253,14 +262,10 @@ struct skb_shared_info {
__be32 ip6_frag_id;
/*
- * Warning : all fields before dataref are cleared in __alloc_skb()
+ * Warning: all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
- /* Intermediate layers must ensure that destructor_arg
- * remains valid until skb destructor */
- void * destructor_arg;
-
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
};
@@ -219,7 +219,10 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
/* make sure we initialize shinfo sequentially */
shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+
+ memset(&shinfo->nr_frags, 0,
+ offsetof(struct skb_shared_info, dataref)
+ - offsetof(struct skb_shared_info, nr_frags));
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
As of the previous patch we align the end (rather than the start) of the struct to a cache line and so, with 32 and 64 byte cache lines and the shinfo size increase from the next patch, the first 8 bytes of the struct end up on a different cache line to the rest of it so make sure it is something relatively unimportant to avoid hitting an extra cache line on hot operations such as kfree_skb. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <eric.dumazet@gmail.com> --- include/linux/skbuff.h | 15 ++++++++++----- net/core/skbuff.c | 5 ++++- 2 files changed, 14 insertions(+), 6 deletions(-)