diff mbox series

[net-next,1/2] net: sched: Do not drop root lock in tcf_qevent_handle()

Message ID e309e07f9626d3c61b6da446a1273f9e7b7a10db.1594732978.git.petrm@mellanox.com
State Changes Requested
Delegated to: David Miller
Headers show
Series net: sched: Do not drop root lock in tcf_qevent_handle() | expand

Commit Message

Petr Machata July 14, 2020, 1:32 p.m. UTC
Mirred currently does not mix well with blocks executed after the qdisc
root lock is taken. This includes classification blocks (such as in PRIO,
ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by
mirred can cause deadlocks: either when the thread of execution attempts to
take the lock a second time, or when two threads end up waiting on each
other's locks.

The qevent patchset attempted to not introduce further badness of this
sort, and dropped the lock before executing the qevent block. However this
lead to too little locking and races between qdisc configuration and packet
enqueue in the RED qdisc.

Before the deadlock issues are solved in a way that can be applied across
many qdiscs reasonably easily, do for qevents what is done for the
classification blocks and just keep holding the root lock.

Signed-off-by: Petr Machata <petrm@mellanox.com>
---
 include/net/pkt_cls.h | 4 ++--
 net/sched/cls_api.c   | 8 +-------
 net/sched/sch_red.c   | 6 +++---
 3 files changed, 6 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 690a7f49c8f9..d4d461236351 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -568,7 +568,7 @@  void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
 			       struct netlink_ext_ack *extack);
 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
-				  spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
+				  struct sk_buff **to_free, int *ret);
 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
 #else
 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
@@ -591,7 +591,7 @@  static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlatt
 
 static inline struct sk_buff *
 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
-		  spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+		  struct sk_buff **to_free, int *ret)
 {
 	return skb;
 }
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 322b279154de..b2b7440c2ae7 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -3822,7 +3822,7 @@  int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index
 EXPORT_SYMBOL(tcf_qevent_validate_change);
 
 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
-				  spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
+				  struct sk_buff **to_free, int *ret)
 {
 	struct tcf_result cl_res;
 	struct tcf_proto *fl;
@@ -3832,9 +3832,6 @@  struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
 
 	fl = rcu_dereference_bh(qe->filter_chain);
 
-	if (root_lock)
-		spin_unlock(root_lock);
-
 	switch (tcf_classify(skb, fl, &cl_res, false)) {
 	case TC_ACT_SHOT:
 		qdisc_qstats_drop(sch);
@@ -3853,9 +3850,6 @@  struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
 		return NULL;
 	}
 
-	if (root_lock)
-		spin_lock(root_lock);
-
 	return skb;
 }
 EXPORT_SYMBOL(tcf_qevent_handle);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index de2be4d04ed6..a79602f7fab8 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,7 @@  static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
 
 		if (INET_ECN_set_ce(skb)) {
 			q->stats.prob_mark++;
-			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
 			if (!skb)
 				return NET_XMIT_CN | ret;
 		} else if (!red_use_nodrop(q)) {
@@ -114,7 +114,7 @@  static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
 
 		if (INET_ECN_set_ce(skb)) {
 			q->stats.forced_mark++;
-			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
 			if (!skb)
 				return NET_XMIT_CN | ret;
 		} else if (!red_use_nodrop(q)) {
@@ -137,7 +137,7 @@  static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
 	return ret;
 
 congestion_drop:
-	skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
+	skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
 	if (!skb)
 		return NET_XMIT_CN | ret;