aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch')
-rw-r--r--target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch133
1 files changed, 133 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch b/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch
new file mode 100644
index 0000000000..5fc75a0742
--- /dev/null
+++ b/target/linux/generic/patches-3.3/046-fq_codel-qdisc-backlog.patch
@@ -0,0 +1,133 @@
+From: Eric Dumazet <edumazet@google.com>
+
+codel_should_drop() logic allows a packet being not dropped if queue
+size is under max packet size.
+
+In fq_codel, we have two possible backlogs : The qdisc global one, and
+the flow local one.
+
+The meaningful one for codel_should_drop() should be the global backlog,
+not the per flow one, so that thin flows can have a non zero drop/mark
+probability.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Dave Taht <dave.taht@bufferbloat.net>
+Cc: Kathleen Nichols <nichols@pollere.com>
+Cc: Van Jacobson <van@pollere.net>
+---
+ include/net/codel.h | 15 +++++++--------
+ net/sched/sch_codel.c | 4 ++--
+ net/sched/sch_fq_codel.c | 5 +++--
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/include/net/codel.h b/include/net/codel.h
+index 7546517..550debf 100644
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -205,7 +205,7 @@ static codel_time_t codel_control_law(codel_time_t t,
+
+
+ static bool codel_should_drop(const struct sk_buff *skb,
+- unsigned int *backlog,
++ struct Qdisc *sch,
+ struct codel_vars *vars,
+ struct codel_params *params,
+ struct codel_stats *stats,
+@@ -219,13 +219,13 @@ static bool codel_should_drop(const struct sk_buff *skb,
+ }
+
+ vars->ldelay = now - codel_get_enqueue_time(skb);
+- *backlog -= qdisc_pkt_len(skb);
++ sch->qstats.backlog -= qdisc_pkt_len(skb);
+
+ if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
+ stats->maxpacket = qdisc_pkt_len(skb);
+
+ if (codel_time_before(vars->ldelay, params->target) ||
+- *backlog <= stats->maxpacket) {
++ sch->qstats.backlog <= stats->maxpacket) {
+ /* went below - stay below for at least interval */
+ vars->first_above_time = 0;
+ return false;
+@@ -249,8 +249,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ struct codel_params *params,
+ struct codel_vars *vars,
+ struct codel_stats *stats,
+- codel_skb_dequeue_t dequeue_func,
+- u32 *backlog)
++ codel_skb_dequeue_t dequeue_func)
+ {
+ struct sk_buff *skb = dequeue_func(vars, sch);
+ codel_time_t now;
+@@ -261,7 +260,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ return skb;
+ }
+ now = codel_get_time();
+- drop = codel_should_drop(skb, backlog, vars, params, stats, now);
++ drop = codel_should_drop(skb, sch, vars, params, stats, now);
+ if (vars->dropping) {
+ if (!drop) {
+ /* sojourn time below target - leave dropping state */
+@@ -292,7 +291,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ qdisc_drop(skb, sch);
+ stats->drop_count++;
+ skb = dequeue_func(vars, sch);
+- if (!codel_should_drop(skb, backlog,
++ if (!codel_should_drop(skb, sch,
+ vars, params, stats, now)) {
+ /* leave dropping state */
+ vars->dropping = false;
+@@ -313,7 +312,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ stats->drop_count++;
+
+ skb = dequeue_func(vars, sch);
+- drop = codel_should_drop(skb, backlog, vars, params,
++ drop = codel_should_drop(skb, sch, vars, params,
+ stats, now);
+ }
+ vars->dropping = true;
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 213ef60..2f9ab17 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -77,8 +77,8 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+- skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats,
+- dequeue, &sch->qstats.backlog);
++ skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
++
+ /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
+ * or HTB crashes. Defer it for next round.
+ */
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 337ff20..9fc1c62 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -217,13 +217,14 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ */
+ static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
+ {
++ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct fq_codel_flow *flow;
+ struct sk_buff *skb = NULL;
+
+ flow = container_of(vars, struct fq_codel_flow, cvars);
+ if (flow->head) {
+ skb = dequeue_head(flow);
+- sch->qstats.backlog -= qdisc_pkt_len(skb);
++ q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
+ sch->q.qlen--;
+ }
+ return skb;
+@@ -256,7 +257,7 @@ begin:
+ prev_ecn_mark = q->cstats.ecn_mark;
+
+ skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+- dequeue, &q->backlogs[flow - q->flows]);
++ dequeue);
+
+ flow->dropped += q->cstats.drop_count - prev_drop_count;
+ flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
+