@@ -804,6 +804,16 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
}
}
+int __rps_check_max_queues(unsigned int idx);
+
+static inline int rps_check_max_queues(unsigned int idx)
+{
+ if (idx < rps_max_num_queues)
+ return 0;
+
+ return __rps_check_max_queues(idx);
+}
+
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
@@ -127,6 +127,54 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
return ret;
}
+
+int __rps_check_max_queues(unsigned int idx)
+{
+ unsigned int old;
+ size_t size;
+ int ret = 0;
+
+ /* Assume maximum queues should be a least the number of CPUs.
+ * This avoids too much thrashing of the sock flow table at
+ * initialization.
+ */
+ if (idx < nr_cpu_ids && nr_cpu_ids < RPS_MAX_QID)
+ idx = nr_cpu_ids;
+
+ if (idx > RPS_MAX_QID)
+ return -EINVAL;
+
+ mutex_lock(&sock_flow_mutex);
+
+ old = rps_max_num_queues;
+ rps_max_num_queues = idx;
+
+ /* No need to reallocate table since nothing is changing */
+
+ if (roundup_pow_of_two(old) != roundup_pow_of_two(idx)) {
+ struct rps_sock_flow_table *sock_table;
+
+ sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ lockdep_is_held(&sock_flow_mutex));
+ size = sock_table ? sock_table->mask + 1 : 0;
+
+ /* Force creation of a new rps_sock_flow_table. It's
+ * the same size as the existing table, but we expunge
+ * any stale queue entries that would refer to the old
+ * queue mask.
+ */
+ ret = rps_create_sock_flow_table(size, size,
+ sock_table, true);
+ if (ret)
+ rps_max_num_queues = old;
+ }
+
+ mutex_unlock(&sock_flow_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(__rps_check_max_queues);
+
#endif /* CONFIG_RPS */
#ifdef CONFIG_NET_FLOW_LIMIT