@@ -384,21 +384,9 @@ static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
prev->next = skb;
}
}
-
-static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static int __fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct fq_flow *f)
{
struct fq_sched_data *q = qdisc_priv(sch);
- struct fq_flow *f;
-
- if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop(skb, sch);
-
- f = fq_classify(skb, q);
- if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
- q->stat_flows_plimit++;
- return qdisc_drop(skb, sch);
- }
-
f->qlen++;
if (skb_is_retransmit(skb))
q->stat_tcp_retrans++;
@@ -421,6 +409,23 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
+static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct fq_flow *f;
+
+ if (unlikely(sch->q.qlen >= sch->limit))
+ return qdisc_drop(skb, sch);
+
+ f = fq_classify(skb, q);
+ if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
+ q->stat_flows_plimit++;
+ return qdisc_drop(skb, sch);
+ }
+
+ return __fq_enqueue(skb, sch, f);
+}
+
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
struct rb_node *p;
Move the original enqueue code to __fq_enqueue() helper. Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> --- net/sched/sch_fq.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-)