@@ -426,6 +426,11 @@ extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
extern void xt_table_unlock(struct xt_table *t);
+extern void xt_tlock_lockall(void);
+extern void xt_tlock_unlockall(void);
+extern void xt_tlock_lock(void);
+extern void xt_tlock_unlock(void);
+
extern int xt_proto_init(struct net *net, u_int8_t af);
extern void xt_proto_fini(struct net *net, u_int8_t af);
@@ -231,8 +231,6 @@ static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
return (struct arpt_entry *)(base + offset);
}
-static DEFINE_PER_CPU(spinlock_t, arp_tables_lock);
-
unsigned int arpt_do_table(struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
@@ -256,7 +254,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
outdev = out ? out->name : nulldevname;
local_bh_disable();
- spin_lock(&__get_cpu_var(arp_tables_lock));
+ xt_tlock_lock();
private = table->private;
table_base = private->entries[smp_processor_id()];
@@ -331,7 +329,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
e = (void *)e + e->next_offset;
}
} while (!hotdrop);
- spin_unlock(&__get_cpu_var(arp_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
if (hotdrop)
@@ -709,33 +707,31 @@ static void get_counters(const struct xt_table_info *t,
{
unsigned int cpu;
unsigned int i = 0;
- unsigned int curcpu = raw_smp_processor_id();
+ unsigned int curcpu;
+ xt_tlock_lockall();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
- * We dont care about preemption here.
*/
- spin_lock_bh(&per_cpu(arp_tables_lock, curcpu));
+ curcpu = smp_processor_id();
ARPT_ENTRY_ITERATE(t->entries[curcpu],
t->size,
set_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(arp_tables_lock, curcpu));
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
- spin_lock_bh(&per_cpu(arp_tables_lock, cpu));
ARPT_ENTRY_ITERATE(t->entries[cpu],
t->size,
add_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(arp_tables_lock, cpu));
}
+ xt_tlock_unlockall();
}
static struct xt_counters *alloc_counters(struct xt_table *table)
@@ -1181,14 +1177,14 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
/* Choose the copy that is on our node */
local_bh_disable();
curcpu = smp_processor_id();
- spin_lock(&__get_cpu_var(arp_tables_lock));
+ xt_tlock_lock();
loc_cpu_entry = private->entries[curcpu];
ARPT_ENTRY_ITERATE(loc_cpu_entry,
private->size,
add_counter_to_entry,
paddc,
&i);
- spin_unlock(&__get_cpu_var(arp_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
unlock_up_free:
@@ -297,7 +297,6 @@ static void trace_packet(struct sk_buff *skb,
}
#endif
-static DEFINE_PER_CPU(spinlock_t, ip_tables_lock);
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
@@ -342,7 +341,7 @@ ipt_do_table(struct sk_buff *skb,
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
local_bh_disable();
- spin_lock(&__get_cpu_var(ip_tables_lock));
+ xt_tlock_lock();
private = table->private;
table_base = private->entries[smp_processor_id()];
@@ -439,7 +438,7 @@ ipt_do_table(struct sk_buff *skb,
e = (void *)e + e->next_offset;
}
} while (!hotdrop);
- spin_unlock(&__get_cpu_var(ip_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
#ifdef DEBUG_ALLOW_ALL
@@ -895,34 +894,32 @@ get_counters(const struct xt_table_info *t,
{
unsigned int cpu;
unsigned int i = 0;
- unsigned int curcpu = raw_smp_processor_id();
+ unsigned int curcpu;
+ xt_tlock_lockall();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
- * We dont care about preemption here.
*/
- spin_lock_bh(&per_cpu(ip_tables_lock, curcpu));
+ curcpu = smp_processor_id();
IPT_ENTRY_ITERATE(t->entries[curcpu],
t->size,
set_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(ip_tables_lock, curcpu));
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
- spin_lock_bh(&per_cpu(ip_tables_lock, cpu));
IPT_ENTRY_ITERATE(t->entries[cpu],
t->size,
add_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(ip_tables_lock, cpu));
}
+ xt_tlock_unlockall();
}
static struct xt_counters * alloc_counters(struct xt_table *table)
@@ -1393,14 +1390,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
local_bh_disable();
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
- spin_lock(&__get_cpu_var(ip_tables_lock));
+ xt_tlock_lock();
loc_cpu_entry = private->entries[curcpu];
IPT_ENTRY_ITERATE(loc_cpu_entry,
private->size,
add_counter_to_entry,
paddc,
&i);
- spin_unlock(&__get_cpu_var(ip_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
unlock_up_free:
@@ -2220,10 +2217,7 @@ static struct pernet_operations ip_tables_net_ops = {
static int __init ip_tables_init(void)
{
- int cpu, ret;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(ip_tables_lock, cpu));
+ int ret;
ret = register_pernet_subsys(&ip_tables_net_ops);
if (ret < 0)
@@ -329,7 +329,6 @@ static void trace_packet(struct sk_buff *skb,
}
#endif
-static DEFINE_PER_CPU(spinlock_t, ip6_tables_lock);
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
@@ -368,7 +367,7 @@ ip6t_do_table(struct sk_buff *skb,
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
local_bh_disable();
- spin_lock(&__get_cpu_var(ip6_tables_lock));
+ xt_tlock_lock();
private = table->private;
table_base = private->entries[smp_processor_id()];
@@ -469,7 +468,7 @@ ip6t_do_table(struct sk_buff *skb,
#ifdef CONFIG_NETFILTER_DEBUG
((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
#endif
- spin_unlock(&__get_cpu_var(ip6_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
#ifdef DEBUG_ALLOW_ALL
@@ -925,33 +924,31 @@ get_counters(const struct xt_table_info *t,
{
unsigned int cpu;
unsigned int i = 0;
- unsigned int curcpu = raw_smp_processor_id();
+ unsigned int curcpu;
+ xt_tlock_lockall();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
* with data used by 'current' CPU
- * We dont care about preemption here.
*/
- spin_lock_bh(&per_cpu(ip6_tables_lock, curcpu));
+ curcpu = smp_processor_id();
IP6T_ENTRY_ITERATE(t->entries[curcpu],
t->size,
set_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(ip6_tables_lock, curcpu));
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
- spin_lock_bh(&per_cpu(ip6_tables_lock, cpu));
IP6T_ENTRY_ITERATE(t->entries[cpu],
t->size,
add_entry_to_counter,
counters,
&i);
- spin_unlock_bh(&per_cpu(ip6_tables_lock, cpu));
}
+ xt_tlock_unlockall();
}
static struct xt_counters *alloc_counters(struct xt_table *table)
@@ -1423,14 +1420,14 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
local_bh_disable();
/* Choose the copy that is on our node */
curcpu = smp_processor_id();
- spin_lock(&__get_cpu_var(ip6_tables_lock));
+ xt_tlock_lock();
loc_cpu_entry = private->entries[curcpu];
IP6T_ENTRY_ITERATE(loc_cpu_entry,
private->size,
add_counter_to_entry,
paddc,
&i);
- spin_unlock(&__get_cpu_var(ip6_tables_lock));
+ xt_tlock_unlock();
local_bh_enable();
unlock_up_free:
@@ -2248,10 +2245,7 @@ static struct pernet_operations ip6_tables_net_ops = {
static int __init ip6_tables_init(void)
{
- int cpu, ret;
-
- for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(ip6_tables_lock, cpu));
+ int ret;
ret = register_pernet_subsys(&ip6_tables_net_ops);
if (ret < 0)
@@ -680,9 +680,13 @@ xt_replace_table(struct xt_table *table,
return NULL;
}
oldinfo = private;
+ /*
+ * make sure all newinfo fields are committed to memory before changing
+ * table->private, since other cpus have no synchronization with us.
+ */
+ smp_wmb();
table->private = newinfo;
newinfo->initial_entries = oldinfo->initial_entries;
-
return oldinfo;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1126,9 +1130,58 @@ static struct pernet_operations xt_net_ops = {
.init = xt_net_init,
};
+static DEFINE_PER_CPU(spinlock_t, xt_tables_lock);
+
+void xt_tlock_lockall(void)
+{
+ int cpu;
+
+ local_bh_disable();
+ preempt_disable();
+ for_each_possible_cpu(cpu) {
+ spin_lock(&per_cpu(xt_tables_lock, cpu));
+ /*
+ * avoid preempt counter overflow
+ */
+ preempt_enable_no_resched();
+ }
+}
+EXPORT_SYMBOL(xt_tlock_lockall);
+
+void xt_tlock_unlockall(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ preempt_disable();
+ spin_unlock(&per_cpu(xt_tables_lock, cpu));
+ }
+ preempt_enable();
+ local_bh_enable();
+}
+EXPORT_SYMBOL(xt_tlock_unlockall);
+
+/*
+ * preemption should be disabled by caller
+ */
+void xt_tlock_lock(void)
+{
+ spin_lock(&__get_cpu_var(xt_tables_lock));
+}
+EXPORT_SYMBOL(xt_tlock_lock);
+
+void xt_tlock_unlock(void)
+{
+ spin_unlock(&__get_cpu_var(xt_tables_lock));
+}
+EXPORT_SYMBOL(xt_tlock_unlock);
+
static int __init xt_init(void)
{
- int i, rv;
+ int i, rv, cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(xt_tables_lock, cpu));
xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
if (!xt)