summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 11:38:13 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 11:38:13 -0800
commitcb18eccff48ef3986d1072964590bce6fec705fb (patch)
tree777fb1d15e0281341e1e02c9803d989538d346f2 /net/sched/sch_cbq.c
parentc827ba4cb49a30ce581201fd0ba2be77cde412c7 (diff)
parent5ef213f6842277ee1df5659f59fac0ffc9beb411 (diff)
downloadkernel-crypto-cb18eccff48ef3986d1072964590bce6fec705fb.tar.gz
kernel-crypto-cb18eccff48ef3986d1072964590bce6fec705fb.tar.xz
kernel-crypto-cb18eccff48ef3986d1072964590bce6fec705fb.zip
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits) [IPV4]: Restore multipath routing after rt_next changes. [XFRM] IPV6: Fix outbound RO transformation which is broken by IPsec tunnel patch. [NET]: Reorder fields of struct dst_entry [DECNET]: Convert decnet route to use the new dst_entry 'next' pointer [IPV6]: Convert ipv6 route to use the new dst_entry 'next' pointer [IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer [NET]: Introduce union in struct dst_entry to hold 'next' pointer [DECNET]: fix misannotation of linkinfo_dn [DECNET]: FRA_{DST,SRC} are le16 for decnet [UDP]: UDP can use sk_hash to speedup lookups [NET]: Fix whitespace errors. [NET] XFRM: Fix whitespace errors. [NET] X25: Fix whitespace errors. [NET] WANROUTER: Fix whitespace errors. [NET] UNIX: Fix whitespace errors. [NET] TIPC: Fix whitespace errors. [NET] SUNRPC: Fix whitespace errors. [NET] SCTP: Fix whitespace errors. [NET] SCHED: Fix whitespace errors. [NET] RXRPC: Fix whitespace errors. ...
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f79a4f3d0a9..48830cac101 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -40,12 +40,12 @@
=======================================
Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
- Management Models for Packet Networks",
+ Management Models for Packet Networks",
IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
- [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
+ [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
- [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
+ [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Parameters", 1996
[4] Sally Floyd and Michael Speer, "Experimental Results
@@ -59,12 +59,12 @@
the implementation is different. Particularly:
--- The WRR algorithm is different. Our version looks more
- reasonable (I hope) and works when quanta are allowed to be
- less than MTU, which is always the case when real time classes
- have small rates. Note, that the statement of [3] is
- incomplete, delay may actually be estimated even if class
- per-round allotment is less than MTU. Namely, if per-round
- allotment is W*r_i, and r_1+...+r_k = r < 1
+ reasonable (I hope) and works when quanta are allowed to be
+ less than MTU, which is always the case when real time classes
+ have small rates. Note, that the statement of [3] is
+ incomplete, delay may actually be estimated even if class
+ per-round allotment is less than MTU. Namely, if per-round
+ allotment is W*r_i, and r_1+...+r_k = r < 1
delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
@@ -280,7 +280,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
+ case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
@@ -479,7 +479,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
if (!cl->delayed) {
delay += cl->offtime;
- /*
+ /*
Class goes to sleep, so that it will have no
chance to work avgidle. Let's forgive it 8)
@@ -717,7 +717,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
}
#endif
-/*
+/*
It is mission critical procedure.
We "regenerate" toplevel cutoff, if transmitting class
@@ -739,7 +739,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
}
} while ((borrowed=borrowed->borrow) != NULL);
}
-#if 0
+#if 0
/* It is not necessary now. Uncommenting it
will save CPU cycles, but decrease fairness.
*/
@@ -768,7 +768,7 @@ cbq_update(struct cbq_sched_data *q)
(now - last) is total time between packet right edges.
(last_pktlen/rate) is "virtual" busy time, so that
- idle = (now - last) - last_pktlen/rate
+ idle = (now - last) - last_pktlen/rate
*/
idle = PSCHED_TDIFF(q->now, cl->last);
@@ -907,7 +907,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
- It could occur even if cl->q->q.qlen != 0
+ It could occur even if cl->q->q.qlen != 0
f.e. if cl->q == "tbf"
*/
if (skb == NULL)
@@ -2131,7 +2131,7 @@ static int __init cbq_module_init(void)
{
return register_qdisc(&cbq_qdisc_ops);
}
-static void __exit cbq_module_exit(void)
+static void __exit cbq_module_exit(void)
{
unregister_qdisc(&cbq_qdisc_ops);
}