@@ -52,7 +52,7 @@ struct Qdisc
u32 parent;
atomic_t refcnt;
unsigned long state;
- struct sk_buff *gso_skb;
+ struct sk_buff_head requeue;
struct sk_buff_head q;
struct netdev_queue *dev_queue;
struct Qdisc *next_sched;
@@ -45,7 +45,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
if (unlikely(skb->next))
- q->gso_skb = skb;
+ __skb_queue_head(&q->requeue, skb);
else
q->ops->requeue(skb, q);
@@ -57,9 +57,8 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
struct sk_buff *skb;
- if ((skb = q->gso_skb))
- q->gso_skb = NULL;
- else
+ skb = __skb_dequeue(&q->requeue);
+ if (!skb)
skb = q->dequeue(q);
return skb;
@@ -328,6 +327,7 @@ struct Qdisc noop_qdisc = {
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
};
@@ -353,6 +353,7 @@ static struct Qdisc noqueue_qdisc = {
.flags = TCQ_F_BUILTIN,
.ops = &noqueue_qdisc_ops,
.list = LIST_HEAD_INIT(noqueue_qdisc.list),
+ .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
.q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
.dev_queue = &noqueue_netdev_queue,
};
@@ -473,6 +474,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->padded = (char *) sch - (char *) p;
INIT_LIST_HEAD(&sch->list);
+ skb_queue_head_init(&sch->requeue);
skb_queue_head_init(&sch->q);
sch->ops = ops;
sch->enqueue = ops->enqueue;
@@ -543,7 +545,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
- kfree_skb(qdisc->gso_skb);
+ __skb_queue_purge(&qdisc->requeue);
kfree((char *) qdisc - qdisc->padded);
}
-------------> patch 2/3
Subject: [PATCH 2/9]: pkt_sched: Always use q->requeue in dev_requeue_skb().
Date: Mon, 18 Aug 2008 01:36:50 -0700 (PDT)
From: David Miller <davem@davemloft.net>
To: netdev@vger.kernel.org
CC: jarkao2@gmail.com
Newsgroups: gmane.linux.network
pkt_sched: Always use q->requeue in dev_requeue_skb().
There is no reason to call into the complicated qdiscs
just to remember the last SKB where we found the device
blocked.
The SKB is outside of the qdiscs realm at this point.
Signed-off-by: David S. Miller <davem@davemloft.net>
---
net/sched/sch_generic.c | 5 +----
1 files changed, 1 insertions(+), 4 deletions(-)
@@ -44,10 +44,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- if (unlikely(skb->next))
- __skb_queue_head(&q->requeue, skb);
- else
- q->ops->requeue(skb, q);
+ __skb_queue_head(&q->requeue, skb);
__netif_schedule(q);
return 0;
----------------> patch 3/3 (take 2)
pkt_sched: Check the state of tx_queue in dequeue_skb()
Check in dequeue_skb() the state of tx_queue for requeued skb to save
on locking and re-requeuing, and possibly remove the current check in
qdisc_run(). Based on the idea of Alexander Duyck.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
---
@@ -52,11 +52,24 @@ static inline int dev_requeue_skb(struct
static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_peek(&q->requeue);
- skb = __skb_dequeue(&q->requeue);
- if (!skb)
+ if (unlikely(skb)) {
+ struct net_device *dev = qdisc_dev(q);
+ struct netdev_queue *txq;
+
+ /* check the reason of requeuing without tx lock first */
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq)) {
+ __skb_unlink(skb, &q->requeue);
+ } else {
+ skb = NULL;
+ __netif_schedule(q);
+ }
+ } else {
skb = q->dequeue(q);
+ }
return skb;
}