diff options
author | YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> | 2007-02-09 23:25:16 +0900 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-02-10 23:20:08 -0800 |
commit | 10297b99315e5e08fe623ba56da35db1fee69ba9 (patch) | |
tree | 06cfd5434ad5d4cb9dd8e0715716da0abd52849c /net | |
parent | 7612713fb69a17b79ca7d757df4446700f4afe6c (diff) | |
download | kernel-crypto-10297b99315e5e08fe623ba56da35db1fee69ba9.tar.gz kernel-crypto-10297b99315e5e08fe623ba56da35db1fee69ba9.tar.xz kernel-crypto-10297b99315e5e08fe623ba56da35db1fee69ba9.zip |
[NET] SCHED: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
33 files changed, 207 insertions, 207 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 835070e9169..dd0868dfbd9 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -65,7 +65,7 @@ int tcf_hash_release(struct tcf_common *p, int bind, p->tcfc_bindcnt--; p->tcfc_refcnt--; - if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { + if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { tcf_hash_destroy(p, hinfo); ret = 1; } @@ -362,7 +362,7 @@ static struct tc_action_ops *tc_lookup_action_id(u32 type) #endif int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, - struct tcf_result *res) + struct tcf_result *res) { struct tc_action *a; int ret = -1; @@ -473,7 +473,7 @@ errout: } struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est, - char *name, int ovr, int bind, int *err) + char *name, int ovr, int bind, int *err) { struct tc_action *a; struct tc_action_ops *a_o; @@ -553,7 +553,7 @@ err_out: } struct tc_action *tcf_action_init(struct rtattr *rta, struct rtattr *est, - char *name, int ovr, int bind, int *err) + char *name, int ovr, int bind, int *err) { struct rtattr *tb[TCA_ACT_MAX_PRIO+1]; struct tc_action *head = NULL, *act, *act_prev = NULL; @@ -590,7 +590,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, int err = 0; struct gnet_dump d; struct tcf_act_hdr *h = a->priv; - + if (h == NULL) goto errout; @@ -632,7 +632,7 @@ errout: static int tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, - u16 flags, int event, int bind, int ref) + u16 flags, int event, int bind, int ref) { struct tcamsg *t; struct nlmsghdr *nlh; @@ -645,7 +645,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; - + x = (struct rtattr*) skb->tail; RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); @@ -653,7 +653,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, goto rtattr_failure; x->rta_len = skb->tail - (u8*)x; - + nlh->nlmsg_len = skb->tail - b; return skb->len; @@ -852,7 +852,7 @@ tca_action_gd(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int event) } if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, - 0, 1) <= 0) { + 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; @@ -861,7 +861,7 @@ tca_action_gd(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int event) /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, pid, RTNLGRP_TC, - n->nlmsg_flags&NLM_F_ECHO); + n->nlmsg_flags&NLM_F_ECHO); if (ret > 0) return 0; return ret; @@ -872,7 +872,7 @@ err: } static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, - u16 flags) + u16 flags) { struct tcamsg *t; struct nlmsghdr *nlh; @@ -900,10 +900,10 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, goto rtattr_failure; x->rta_len = skb->tail - (u8*)x; - + nlh->nlmsg_len = skb->tail - b; NETLINK_CB(skb).dst_group = RTNLGRP_TC; - + err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO); if (err > 0) err = 0; @@ -915,7 +915,7 @@ nlmsg_failure: return -1; } - + static int tcf_action_add(struct rtattr *rta, struct nlmsghdr *n, u32 pid, int ovr) { @@ -999,13 +999,13 @@ find_dump_kind(struct nlmsghdr *n) return NULL; if (rtattr_parse(tb, TCA_ACT_MAX_PRIO, RTA_DATA(tb1), - NLMSG_ALIGN(RTA_PAYLOAD(tb1))) < 0) + NLMSG_ALIGN(RTA_PAYLOAD(tb1))) < 0) return NULL; if (tb[0] == NULL) return NULL; if (rtattr_parse(tb2, TCA_ACT_MAX, RTA_DATA(tb[0]), - RTA_PAYLOAD(tb[0])) < 0) + RTA_PAYLOAD(tb[0])) < 0) return NULL; kind = tb2[TCA_ACT_KIND-1]; @@ -1043,7 +1043,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) } nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, - cb->nlh->nlmsg_type, sizeof(*t)); + cb->nlh->nlmsg_type, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 85de7efd5fe..60095d86fd8 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -65,7 +65,7 @@ static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; #endif /* CONFIG_GACT_PROB */ static int tcf_gact_init(struct rtattr *rta, struct rtattr *est, - struct tc_action *a, int ovr, int bind) + struct tc_action *a, int ovr, int bind) { struct rtattr *tb[TCA_GACT_MAX]; struct tc_gact *parm; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 4c68c718f5e..0fdabfa9f4b 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -70,7 +70,7 @@ static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int } if (t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(table, NULL, - t->u.kernel.target, t->data, + t->u.kernel.target, t->data, hook)) { module_put(t->u.kernel.target->me); ret = -EINVAL; @@ -83,7 +83,7 @@ static void ipt_destroy_target(struct ipt_entry_target *t) { if (t->u.kernel.target->destroy) t->u.kernel.target->destroy(t->u.kernel.target, t->data); - module_put(t->u.kernel.target->me); + module_put(t->u.kernel.target->me); } static int tcf_ipt_release(struct tcf_ipt *ipt, int bind) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 8ac65c219b9..53aa96cd579 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -153,8 +153,8 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, if (tkey->offmask) { if (skb->len > tkey->at) { char *j = pptr + tkey->at; - offset += ((*j & tkey->offmask) >> - tkey->shift); + offset += ((*j & tkey->offmask) >> + tkey->shift); } else { goto bad; } @@ -176,7 +176,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, *ptr = ((*ptr & tkey->mask) ^ tkey->val); munged++; } - + if (munged) skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); goto done; @@ -200,8 +200,8 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, struct tcf_pedit *p = a->priv; struct tc_pedit *opt; struct tcf_t t; - int s; - + int s; + s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key); /* netlink spinlocks held above us - must use ATOMIC */ diff --git a/net/sched/act_police.c b/net/sched/act_police.c index af68e1e8325..6ffe35da22b 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -62,7 +62,7 @@ struct tc_police_compat #ifdef CONFIG_NET_CLS_ACT static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, - int type, struct tc_action *a) + int type, struct tc_action *a) { struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; @@ -112,7 +112,7 @@ void tcf_police_destroy(struct tcf_police *p) { unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); struct tcf_common **p1p; - + for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) { if (*p1p == &p->common) { write_lock_bh(&police_lock); @@ -135,7 +135,7 @@ void tcf_police_destroy(struct tcf_police *p) #ifdef CONFIG_NET_CLS_ACT static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est, - struct tc_action *a, int ovr, int bind) + struct tc_action *a, int ovr, int bind) { unsigned h; int ret = 0, err; @@ -269,7 +269,7 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind) } static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, - struct tcf_result *res) + struct tcf_result *res) { struct tcf_police *police = a->priv; psched_time_t now; @@ -606,12 +606,12 @@ rtattr_failure: int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police) { struct gnet_dump d; - + if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS, police->tcf_stats_lock, &d) < 0) goto errout; - + if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || #ifdef CONFIG_NET_ESTIMATOR gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 5fe80854ca9..c7971182af0 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -43,9 +43,9 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result d->tcf_bstats.bytes += skb->len; d->tcf_bstats.packets++; - /* print policy string followed by _ then packet count - * Example if this was the 3rd packet and the string was "hello" - * then it would look like "hello_3" (without quotes) + /* print policy string followed by _ then packet count + * Example if this was the 3rd packet and the string was "hello" + * then it would look like "hello_3" (without quotes) **/ printk("simple: %s_%d\n", (char *)d->tcfd_defdata, d->tcf_bstats.packets); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index edb8fc97ae1..f41f4ee0587 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -286,7 +286,7 @@ replay: goto errout; } else { switch (n->nlmsg_type) { - case RTM_NEWTFILTER: + case RTM_NEWTFILTER: err = -EEXIST; if (n->nlmsg_flags&NLM_F_EXCL) goto errout; @@ -481,11 +481,11 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts) int tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, - struct rtattr *rate_tlv, struct tcf_exts *exts, - struct tcf_ext_map *map) + struct rtattr *rate_tlv, struct tcf_exts *exts, + struct tcf_ext_map *map) { memset(exts, 0, sizeof(*exts)); - + #ifdef CONFIG_NET_CLS_ACT { int err; @@ -511,7 +511,7 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, #elif defined CONFIG_NET_CLS_POLICE if (map->police && tb[map->police-1]) { struct tcf_police *p; - + p = tcf_police_locate(tb[map->police-1], rate_tlv); if (p == NULL) return -EINVAL; @@ -530,7 +530,7 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb, void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, - struct tcf_exts *src) + struct tcf_exts *src) { #ifdef CONFIG_NET_CLS_ACT if (src->action) { @@ -597,7 +597,7 @@ rtattr_failure: __attribute__ ((unused)) int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts, - struct tcf_ext_map *map) + struct tcf_ext_map *map) { #ifdef CONFIG_NET_CLS_ACT if (exts->action) diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 09fda68c8b3..ea13c2c5b06 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -98,7 +98,7 @@ static void basic_destroy(struct tcf_proto *tp) { struct basic_head *head = (struct basic_head *) xchg(&tp->root, NULL); struct basic_filter *f, *n; - + list_for_each_entry_safe(f, n, &head->flist, link) { list_del(&f->link); basic_delete_filter(tp, f); @@ -157,7 +157,7 @@ errout: } static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle, - struct rtattr **tca, unsigned long *arg) + struct rtattr **tca, unsigned long *arg) { int err = -EINVAL; struct basic_head *head = (struct basic_head *) tp->root; @@ -292,7 +292,7 @@ static int __init init_basic(void) return register_tcf_proto_ops(&cls_basic_ops); } -static void __exit exit_basic(void) +static void __exit exit_basic(void) { unregister_tcf_proto_ops(&cls_basic_ops); } diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index c797d6ada7d..2ce3ce5c66e 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -91,7 +91,7 @@ static __inline__ int fw_hash(u32 handle) else if (HTSIZE == 256) { u8 *t = (u8 *) &handle; return t[0] ^ t[1] ^ t[2] ^ t[3]; - } else + } else return handle & (HTSIZE - 1); } @@ -407,7 +407,7 @@ static int __init init_fw(void) return register_tcf_proto_ops(&cls_fw_ops); } -static void __exit exit_fw(void) +static void __exit exit_fw(void) { unregister_tcf_proto_ops(&cls_fw_ops); } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 587b9adab38..7853621a04c 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -130,7 +130,7 @@ static struct tcf_ext_map rsvp_ext_map = { else if (r > 0) \ return r; \ } - + static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) { @@ -347,7 +347,7 @@ static int tunnel_bts(struct rsvp_head *data) { int n = data->tgenerator>>5; u32 b = 1<<(data->tgenerator&0x1F); - + if (data->tmap[n]&b) return 0; data->tmap[n] |= b; @@ -547,7 +547,7 @@ insert: s->next = *sp; wmb(); *sp = s; - + goto insert; errout: @@ -654,7 +654,7 @@ static int __init init_rsvp(void) return register_tcf_proto_ops(&RSVP_OPS); } -static void __exit exit_rsvp(void) +static void __exit exit_rsvp(void) { unregister_tcf_proto_ops(&RSVP_OPS); } diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 5af8a59e150..040e2d2d281 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -222,7 +222,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map); if (err < 0) return err; - + memcpy(&cp, p, sizeof(cp)); memset(&new_filter_result, 0, sizeof(new_filter_result)); @@ -316,12 +316,12 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto errout_alloc; - } + } if (tb[TCA_TCINDEX_CLASSID-1]) { cr.res.classid = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]); tcf_bind_filter(tp, &cr.res, base); - } + } tcf_exts_change(tp, &cr.exts, &e); @@ -341,7 +341,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) /* nothing */; *fp = f; - } + } tcf_tree_unlock(tp); return 0; @@ -491,7 +491,7 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) goto rtattr_failure; } - + return skb->len; rtattr_failure: @@ -519,7 +519,7 @@ static int __init init_tcindex(void) return register_tcf_proto_ops(&cls_tcindex_ops); } -static void __exit exit_tcindex(void) +static void __exit exit_tcindex(void) { unregister_tcf_proto_ops(&cls_tcindex_ops); } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 8b519480199..a232671cfa4 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -760,7 +760,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh, RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev); #endif #ifdef CONFIG_CLS_U32_PERF - RTA_PUT(skb, TCA_U32_PCNT, + RTA_PUT(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), n->pf); #endif @@ -810,7 +810,7 @@ static int __init init_u32(void) return register_tcf_proto_ops(&cls_u32_ops); } -static void __exit exit_u32(void) +static void __exit exit_u32(void) { unregister_tcf_proto_ops(&cls_u32_ops); } diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c index 8ed93c39b4e..8d6dacd8190 100644 --- a/net/sched/em_cmp.c +++ b/net/sched/em_cmp.c @@ -88,7 +88,7 @@ static int __init init_em_cmp(void) return tcf_em_register(&em_cmp_ops); } -static void __exit exit_em_cmp(void) +static void __exit exit_em_cmp(void) { tcf_em_unregister(&em_cmp_ops); } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 45d47d37155..60acf8cdb27 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -9,7 +9,7 @@ * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== - * + * * The metadata ematch compares two meta objects where each object * represents either a meta value stored in the kernel or a static * value provided by userspace. The objects are not provided by @@ -290,7 +290,7 @@ META_COLLECTOR(var_sk_bound_if) dst->len = 3; } else { struct net_device *dev; - + dev = dev_get_by_index(skb->sk->sk_bound_dev_if); *err = var_dev(dev, dst); if (dev) @@ -671,7 +671,7 @@ static inline struct meta_type_ops * meta_type_ops(struct meta_value *v) * Core **************************************************************************/ -static inline int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, +static inline int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info, struct meta_value *v, struct meta_obj *dst) { int err = 0; @@ -753,7 +753,7 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, struct rtattr *tb[TCA_EM_META_MAX]; struct tcf_meta_hdr *hdr; struct meta_match *meta = NULL; - + if (rtattr_parse(tb, TCA_EM_META_MAX, data, len) < 0) goto errout; @@ -822,7 +822,7 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em) rtattr_failure: return -1; -} +} static struct tcf_ematch_ops em_meta_ops = { .kind = TCF_EM_META, @@ -839,7 +839,7 @@ static int __init init_em_meta(void) return tcf_em_register(&em_meta_ops); } -static void __exit exit_em_meta(void) +static void __exit exit_em_meta(void) { tcf_em_unregister(&em_meta_ops); } diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c index 005db409be6..42103b2bdc5 100644 --- a/net/sched/em_nbyte.c +++ b/net/sched/em_nbyte.c @@ -23,7 +23,7 @@ struct nbyte_data struct tcf_em_nbyte hdr; char pattern[0]; }; - + static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len, struct tcf_ematch *em) { @@ -68,7 +68,7 @@ static int __init init_em_nbyte(void) return tcf_em_register(&em_nbyte_ops); } -static void __exit exit_em_nbyte(void) +static void __exit exit_em_nbyte(void) { tcf_em_unregister(&em_nbyte_ops); } diff --git a/net/sched/em_text.c b/net/sched/em_text.c index aa17d8f7c4c..8ad894b58fc 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c @@ -125,7 +125,7 @@ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) rtattr_failure: return -1; -} +} static struct tcf_ematch_ops em_text_ops = { .kind = TCF_EM_TEXT, @@ -142,7 +142,7 @@ static int __init init_em_text(void) return tcf_em_register(&em_text_ops); } -static void __exit exit_em_text(void) +static void __exit exit_em_text(void) { tcf_em_unregister(&em_text_ops); } diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c index e3ddfce0ac8..cd0600c6796 100644 --- a/net/sched/em_u32.c +++ b/net/sched/em_u32.c @@ -23,7 +23,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, { struct tc_u32_key *key = (struct tc_u32_key *) em->data; unsigned char *ptr = skb->nh.raw; - + if (info) { if (info->ptr) ptr = info->ptr; @@ -34,7 +34,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, if (!tcf_valid_offset(skb, ptr, sizeof(u32))) return 0; - + return !(((*(u32*) ptr) ^ key->val) & key->mask); } @@ -51,7 +51,7 @@ static int __init init_em_u32(void) return tcf_em_register(&em_u32_ops); } -static void __exit exit_em_u32(void) +static void __exit exit_em_u32(void) { tcf_em_unregister(&em_u32_ops); } diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 8f8a16da72a..d3ad36b3612 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -37,12 +37,12 @@ * --------<-POP--------- * * where B is a virtual ematch referencing to sequence starting with B1. - * + * * ========================================================================== * * How to write an ematch in 60 seconds * ------------------------------------ - * + * * 1) Provide a matcher function: * static int my_match(struct sk_buff *skb, struct tcf_ematch *m, * struct tcf_pkt_info *info) @@ -115,7 +115,7 @@ static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) /** * tcf_em_register - register an extended match - * + * * @ops: ematch operations lookup table * * This function must be called by ematches to announce their presence. @@ -211,7 +211,7 @@ static int tcf_em_validate(struct tcf_proto *tp, if (ref <= idx) goto errout; - + em->data = ref; } else { /* Note: This lookup will increase the module refcnt @@ -327,7 +327,7 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct rtattr *rta, /* We do not use rtattr_parse_nested here because the maximum * number of attributes is unknown. This saves us the allocation * for a tb buffer which would serve no purpose at all. - * + * * The array of rt attributes is parsed in the order as they are * provided, their type must be incremental from 1 to n. Even * if it does not serve any real purpose, a failure of sticking @@ -399,7 +399,7 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree) module_put(em->ops->owner); } } - + tree->hdr.nmatches = 0; kfree(tree->matches); } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 65825f4409d..60b92fcdc8b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -389,7 +389,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, struct Qdisc *q = *old; - if (parent == NULL) { + if (parent == NULL) { if (q && q->flags&TCQ_F_INGRESS) { *old = dev_graft_qdisc(dev, q); } else { @@ -596,7 +596,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) q = qdisc_leaf(p, clid); } else { /* ingress */ q = dev->qdisc_ingress; - } + } } else { q = dev->qdisc_sleeping; } @@ -743,7 +743,7 @@ create_n_graft: return -ENOENT; if (clid == TC_H_INGRESS) q = qdisc_create(dev, tcm->tcm_parent, tca, &err); - else + else q = qdisc_create(dev, tcm->tcm_handle, tca, &err); if (q == NULL) { if (err == -EAGAIN) @@ -808,10 +808,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, #endif gnet_stats_copy_queue(&d, &q->qstats) < 0) goto rtattr_failure; - + if (gnet_stats_finish_copy(&d) < 0) goto rtattr_failure; - + nlh->nlmsg_len = skb->tail - b; return skb->len; @@ -954,7 +954,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) } /* OK. Locate qdisc */ - if ((q = qdisc_lookup(dev, qid)) == NULL) + if ((q = qdisc_lookup(dev, qid)) == NULL) return -ENOENT; /* An check that it supports classes */ @@ -978,7 +978,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) goto out; } else { switch (n->nlmsg_type) { - case RTM_NEWTCLASS: + case RTM_NEWTCLASS: err = -EEXIST; if (n->nlmsg_flags&NLM_F_EXCL) goto out; @@ -1162,7 +1162,7 @@ reclassify: skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd); goto reclassify; } else { - if (skb->tc_verd) + if (skb->tc_verd) skb->tc_verd = SET_TC_VERD(skb->tc_verd,0); return err; } @@ -1200,7 +1200,7 @@ static struct file_operations psched_fops = { .read = seq_read, .llseek = seq_lseek, .release = single_release, -}; +}; #endif #ifdef CONFIG_NET_SCH_CLK_CPU diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index edc7bb0b9c8..afb3bbd571f 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -107,7 +107,7 @@ static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch, struct atm_qdisc_data *p = PRIV(sch); struct atm_flow_data *flow; - for (flow = p->flows; flow; flow = flow->next) + for (flow = p->flows; flow; flow = flow->next) if (flow->classid == classid) break; return flow; } @@ -125,7 +125,7 @@ static int atm_tc_graft(struct Qdisc *sch,unsigned long arg, if (!new) new = &noop_qdisc; *old = xchg(&flow->q,new); if (*old) qdisc_reset(*old); - return 0; + return 0; } @@ -145,7 +145,7 @@ static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid) DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid); flow = lookup_flow(sch,classid); - if (flow) flow->ref++; + if (flow) flow->ref++; DPRINTK("atm_tc_get: flow %p\n",flow); return (unsigned long) flow; } @@ -280,9 +280,9 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, opt->rta_type,RTA_PAYLOAD(opt),hdr_len); if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */ DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file)); - if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { + if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { error = -EPROTOTYPE; - goto err_out; + goto err_out; } /* @@@ should check if the socket is really operational or we'll crash on vcc->send */ @@ -320,9 +320,9 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, flow->q = &noop_qdisc; DPRINTK("atm_tc_change: qdisc %p\n",flow->q); flow->sock = sock; - flow->vcc = ATM_SD(sock); /* speedup */ + flow->vcc = ATM_SD(sock); /* speedup */ flow->vcc->user_back = flow; - DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); + DPRINTK("atm_tc_change: vcc %p\n",flow->vcc); flow->old_pop = flow->vcc->pop; flow->parent = p; flow->vcc->pop = sch_atm_pop; @@ -391,7 +391,7 @@ static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl) struct atm_flow_data *flow = (struct atm_flow_data *) cl; DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow); - return flow ? &flow->filter_list : &p->link.filter_list; + return flow ? &flow->filter_list : &p->link.filter_list; } @@ -546,8 +546,8 @@ static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch) D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p); ret = p->link.q->ops->requeue(skb,p->link.q); if (!ret) { - sch->q.qlen++; - sch->qstats.requeues++; + sch->q.qlen++; + sch->qstats.requeues++; } else { sch->qstats.drops++; p->link.qstats.drops++; @@ -726,7 +726,7 @@ static int __init atm_init(void) return register_qdisc(&atm_qdisc_ops); } -static void __exit atm_exit(void) +static void __exit atm_exit(void) { unregister_qdisc(&atm_qdisc_ops); } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index f79a4f3d0a9..48830cac101 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -40,12 +40,12 @@ ======================================= Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource - Management Models for Packet Networks", + Management Models for Packet Networks", IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995 - [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 + [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995 - [3] Sally Floyd, "Notes on Class-Based Queueing: Setting + [3] Sally Floyd, "Notes on Class-Based Queueing: Setting Parameters", 1996 [4] Sally Floyd and Michael Speer, "Experimental Results @@ -59,12 +59,12 @@ the implementation is different. Particularly: --- The WRR algorithm is different. Our version looks more - reasonable (I hope) and works when quanta are allowed to be - less than MTU, which is always the case when real time classes - have small rates. Note, that the statement of [3] is - incomplete, delay may actually be estimated even if class - per-round allotment is less than MTU. Namely, if per-round - allotment is W*r_i, and r_1+...+r_k = r < 1 + reasonable (I hope) and works when quanta are allowed to be + less than MTU, which is always the case when real time classes + have small rates. Note, that the statement of [3] is + incomplete, delay may actually be estimated even if class + per-round allotment is less than MTU. Namely, if per-round + allotment is W*r_i, and r_1+...+r_k = r < 1 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B @@ -280,7 +280,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: - case TC_ACT_STOLEN: + case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS; case TC_ACT_SHOT: return NULL; @@ -479,7 +479,7 @@ static void cbq_ovl_classic(struct cbq_class *cl) if (!cl->delayed) { delay += cl->offtime; - /* + /* Class goes to sleep, so that it will have no chance to work avgidle. Let's forgive it 8) @@ -717,7 +717,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) } #endif -/* +/* It is mission critical procedure. We "regenerate" toplevel cutoff, if transmitting class @@ -739,7 +739,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, } } while ((borrowed=borrowed->borrow) != NULL); } -#if 0 +#if 0 /* It is not necessary now. Uncommenting it will save CPU cycles, but decrease fairness. */ @@ -768,7 +768,7 @@ cbq_update(struct cbq_sched_data *q) (now - last) is total time between packet right edges. (last_pktlen/rate) is "virtual" busy time, so that - idle = (now - last) - last_pktlen/rate + idle = (now - last) - last_pktlen/rate */ idle = PSCHED_TDIFF(q->now, cl->last); @@ -907,7 +907,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio) skb = cl->q->dequeue(cl->q); /* Class did not give us any skb :-( - It could occur even if cl->q->q.qlen != 0 + It could occur even if cl->q->q.qlen != 0 f.e. if cl->q == "tbf" */ if (skb == NULL) @@ -2131,7 +2131,7 @@ static int __init cbq_module_init(void) { return register_qdisc(&cbq_qdisc_ops); } -static void __exit cbq_module_exit(void) +static void __exit cbq_module_exit(void) { unregister_qdisc(&cbq_qdisc_ops); } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index d5421816f00..96324cf4e6a 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -68,7 +68,7 @@ static inline int dsmark_valid_indices(u16 indices) return 0; indices >>= 1; } - + return 1; } @@ -100,7 +100,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, qdisc_reset(*old); sch_tree_unlock(sch); - return 0; + return 0; } static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg) @@ -151,7 +151,7 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent, if (tb[TCA_DSMARK_VALUE-1]) p->value[*arg-1] = RTA_GET_U8(tb[TCA_DSMARK_VALUE-1]); - + if (tb[TCA_DSMARK_MASK-1]) p->mask[*arg-1] = mask; @@ -167,7 +167,7 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg) if (!dsmark_valid_index(p, arg)) return -EINVAL; - + p->mask[arg-1] = 0xff; p->value[arg-1] = 0; @@ -193,9 +193,9 @@ static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker) break; } } -ignore: +ignore: walker->count++; - } + } } static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl) @@ -338,7 +338,7 @@ static unsigned int dsmark_drop(struct Qdisc *sch) { struct dsmark_qdisc_data *p = PRIV(sch); unsigned int len; - + DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n", sch, p); if (p->q->ops->drop == NULL) @@ -506,7 +506,7 @@ static int __init dsmark_module_init(void) return register_qdisc(&dsmark_qdisc_ops); } -static void __exit dsmark_module_exit(void) +static void __exit dsmark_module_exit(void) { unregister_qdisc(&dsmark_qdisc_ops); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 3b6e6a78092..52eb3439d7c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -36,7 +36,7 @@ /* Main transmission queue. */ -/* Main qdisc structure lock. +/* Main qdisc structure lock. However, modifications to data, participating in scheduling must be additionally @@ -66,7 +66,7 @@ void qdisc_unlock_tree(struct net_device *dev) write_unlock(&qdisc_tree_lock); } -/* +/* dev->queue_lock serializes queue accesses for this device AND dev->qdisc pointer itself. @@ -82,7 +82,7 @@ void qdisc_unlock_tree(struct net_device *dev) we do not check dev->tbusy flag here. Returns: 0 - queue is empty. - >0 - queue is not empty, but throttled. + >0 - queue is not empty, but throttled. <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. NOTE: Called under dev->queue_lock with locally disabled BH. @@ -112,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev) if (!netif_tx_trylock(dev)) { collision: /* So, someone grabbed the driver. */ - + /* It may be transient configuration error, when hard_start_xmit() recurses. We detect it by checking xmit owner and drop the @@ -128,7 +128,7 @@ static inline int qdisc_restart(struct net_device *dev) goto requeue; } } - + { /* And release queue */ spin_unlock(&dev->queue_lock); @@ -137,7 +137,7 @@ static inline int qdisc_restart(struct net_device *dev) int ret; ret = dev_hard_start_xmit(skb, dev); - if (ret == NETDEV_TX_OK) { + if (ret == NETDEV_TX_OK) { if (!nolock) { netif_tx_unlock(dev); } @@ -146,15 +146,15 @@ static inline int qdisc_restart(struct net_device *dev) } if (ret == NETDEV_TX_LOCKED && nolock) { spin_lock(&dev->queue_lock); - goto collision; + goto collision; } } /* NETDEV_TX_BUSY - we need to requeue */ /* Release the driver */ - if (!nolock) { + if (!nolock) { netif_tx_unlock(dev); - } + } spin_lock(&dev->queue_lock); q = dev->qdisc; } @@ -300,7 +300,7 @@ struct Qdisc noop_qdisc = { .enqueue = noop_enqueue, .dequeue = noop_dequeue, .flags = TCQ_F_BUILTIN, - .ops = &noop_qdisc_ops, + .ops = &noop_qdisc_ops, .list = LIST_HEAD_INIT(noop_qdisc.list), }; @@ -454,7 +454,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; - + sch = qdisc_alloc(dev, ops); if (IS_ERR(sch)) goto errout; @@ -478,7 +478,7 @@ void qdisc_reset(struct Qdisc *qdisc) ops->reset(qdisc); } -/* this is the rcu callback function to clean up a qdisc when there +/* this is the rcu callback function to clean up a qdisc when there * are no further references to it */ static void __qdisc_destroy(struct rcu_head *head) @@ -600,10 +600,10 @@ void dev_shutdown(struct net_device *dev) dev->qdisc_sleeping = &noop_qdisc; qdisc_destroy(qdisc); #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) - if ((qdisc = dev->qdisc_ingress) != NULL) { + if ((qdisc = dev->qdisc_ingress) != NULL) { dev->qdisc_ingress = NULL; qdisc_destroy(qdisc); - } + } #endif BUG_TRAP(!timer_pending(&dev->watchdog_timer)); qdisc_unlock_tree(dev); diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 18e81a8ffb0..fa1b4fe7a5f 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -335,7 +335,7 @@ static void gred_reset(struct Qdisc* sch) qdisc_reset_queue(sch); - for (i = 0; i < t->DPs; i++) { + for (i = 0; i < t->DPs; i++) { struct gred_sched_data *q = t->tab[i]; if (!q) @@ -393,7 +393,7 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps) "shadowed VQ 0x%x\n", i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; - } + } } return 0; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6eefa699577..135087d4213 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -284,7 +284,7 @@ static inline struct hfsc_class * eltree_get_minel(struct hfsc_sched *q) { struct rb_node *n; - + n = rb_first(&q->eligible); if (n == NULL) return NULL; @@ -773,7 +773,7 @@ init_vf(struct hfsc_class *cl, unsigned int len) /* update the virtual curve */ vt = cl->cl_vt + cl->cl_vtoff; rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, - cl->cl_total); + cl->cl_total); if (cl->cl_virtual.x == vt) { cl->cl_virtual.x -= cl->cl_vtoff; cl->cl_vtoff = 0; @@ -796,10 +796,10 @@ init_vf(struct hfsc_class *cl, unsigned int len) /* update the ulimit curve */ rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, - cl->cl_total); + cl->cl_total); /* compute myf */ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, - cl->cl_total); + cl->cl_total); cl->cl_myfadj = 0; } } @@ -853,7 +853,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) * update vt and f */ cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) - - cl->cl_vtoff + cl->cl_vtadj; + - cl->cl_vtoff + cl->cl_vtadj; /* * if vt of the class is smaller than cvtmin, @@ -870,7 +870,7 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) if (cl->cl_flags & HFSC_USC) { cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, - cl->cl_total); + cl->cl_total); #if 0 /* * This code causes classes to stay way under their @@ -1001,7 +1001,7 @@ hfsc_find_class(u32 classid, struct Qdisc *sch) static void hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, - u64 cur_time) + u64 cur_time) { sc2isc(rsc, &cl->cl_rsc); rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); @@ -1023,7 +1023,7 @@ hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) static void hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, - u64 cur_time) + u64 cur_time) { sc2isc(usc, &cl->cl_usc); rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); @@ -1032,7 +1032,7 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, static int hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, - struct rtattr **tca, unsigned long *arg) + struct rtattr **tca, unsigned long *arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)*arg; @@ -1228,9 +1228,9 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: - case TC_ACT_STOLEN: + case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS; - case TC_ACT_SHOT: + case TC_ACT_SHOT: return NULL; } #elif defined(CONFIG_NET_CLS_POLICE) @@ -1259,7 +1259,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) static int hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, - struct Qdisc **old) + struct Qdisc **old) { struct hfsc_class *cl = (struct hfsc_class *)arg; @@ -1397,7 +1397,7 @@ hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) static int hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, - struct tcmsg *tcm) + struct tcmsg *tcm) { struct hfsc_class *cl = (struct hfsc_class *)arg; unsigned char *b = skb->tail; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 15f23c5511a..1f098d862f9 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -11,7 +11,7 @@ * Credits (in time order) for older HTB versions: * Stef Coene <stef.coene@docum.org> * HTB support at LARTC mailing list - * Ondrej Kraus, <krauso@barr.cz> + * Ondrej Kraus, <krauso@barr.cz> * found missing INIT_QDISC(htb) * Vladimir Smelhaus, Aamer Akhter, Bert Hubert * helped a lot to locate nasty class stall bug @@ -59,11 +59,11 @@ Author: devik@cdi.cz ======================================================================== HTB is like TBF with multiple classes. It is also similar to CBQ because - it allows to assign priority to each class in hierarchy. + it allows to assign priority to each class in hierarchy. In fact it is another implementation of Floyd's formal sharing. Levels: - Each class is assigned level. Leaf has ALWAYS level 0 and root + Each class is assigned level. Leaf has ALWAYS level 0 and root classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level one less than their parent. */ @@ -245,7 +245,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) * We allow direct class selection by classid in priority. The we examine * filters in qdisc and in inner nodes (if higher filter points to the inner * node). If we end up with classid MAJOR:0 we enqueue the skb into special - * internal fifo (direct). These packets then go directly thru. If we still + * internal fifo (direct). These packets then go directly thru. If we still * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull * then finish and return direct queue. */ @@ -433,7 +433,7 @@ static inline void htb_remove_class_from_row(struct htb_sched *q, * htb_activate_prios - creates active classe's feed chain * * The class is connected to ancestors and/or appropriate rows - * for priorities it is participating on. cl->cmode must be new + * for priorities it is participating on. cl->cmode must be new * (activated) mode. It does nothing if cl->prio_activity == 0. */ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) @@ -466,7 +466,7 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) /** * htb_deactivate_prios - remove class from feed chain * - * cl->cmode must represent old mode (before deactivation). It does + * cl->cmode must represent old mode (before deactivation). It does * nothing if cl->prio_activity == 0. Class is removed from all feed * chains and rows. */ @@ -524,9 +524,9 @@ static inline long htb_hiwater(const struct htb_class *cl) * * It computes cl's mode at time cl->t_c+diff and returns it. If mode * is not HTB_CAN_SEND then cl->pq_key is updated to time difference - * from now to time when cl will change its state. + * from now to time when cl will change its state. * Also it is worth to note that class mode doesn't change simply - * at cl->{c,}tokens == 0 but there can rather be hysteresis of + * at cl->{c,}tokens == 0 but there can rather be hysteresis of * 0 .. -cl->{c,}buffer range. It is meant to limit number of * mode transitions per time unit. The speed gain is about 1/6. */ @@ -575,7 +575,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff) } /** - * htb_activate - inserts leaf cl into appropriate active feeds + * htb_activate - inserts leaf cl into appropriate active feeds * * Routine learns (new) priority of leaf and activates feed chain * for the prio. It can be called on already active leaf safely. @@ -594,7 +594,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) } /** - * htb_deactivate - remove leaf cl from active feeds + * htb_deactivate - remove leaf cl from active feeds * * Make sure that leaf is active. In the other words it can't be called * with non-active leaf. It also removes class from the drop list. @@ -854,7 +854,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio, for (i = 0; i < 65535; i++) { if (!*sp->pptr && *sp->pid) { - /* ptr was invalidated but id is valid - try to recover + /* ptr was invalidated but id is valid - try to recover the original or next ptr */ *sp->pptr = htb_id_find_next_upper(prio, sp->root, *sp->pid); @@ -906,7 +906,7 @@ next: /* class can be empty - it is unlikely but can be true if leaf qdisc drops packets in enqueue routine or if someone used - graft operation on the leaf since last dequeue; + graft operation on the leaf since last dequeue; simply deactivate and skip such class */ if (unlikely(cl->un.leaf.q->q.qlen == 0)) { struct htb_class *next; @@ -1229,7 +1229,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, if (cl && !cl->level) { if (new == NULL && (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, - cl->classid)) + cl->classid)) == NULL) return -ENOBUFS; sch_tree_lock(sch); @@ -1347,7 +1347,7 @@ static void htb_destroy(struct Qdisc *sch) del_timer_sync(&q->rttim); #endif /* This line used to be after htb_destroy_class call below - and surprisingly it worked in 2.4. But it must precede it + and surprisingly it worked in 2.4. But it must precede it because filter need its target class alive to be able to call unbind_filter on it (without Oops). */ htb_destroy_filters(&q->filter_list); diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index c3242f727d4..cfe070ee6ee 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -1,4 +1,4 @@ -/* net/sched/sch_ingress.c - Ingress qdisc +/* net/sched/sch_ingress.c - Ingress qdisc * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version @@ -47,7 +47,7 @@ */ #ifndef CONFIG_NET_CLS_ACT #ifdef CONFIG_NETFILTER -static int nf_registered; +static int nf_registered; #endif #endif @@ -70,7 +70,7 @@ static int ingress_graft(struct Qdisc *sch,unsigned long arg, DPRINTK("ingress_graft(sch %p,[qdisc %p],new %p,old %p)\n", sch, p, new, old); DPRINTK("\n ingress_graft: You cannot add qdiscs to classes"); - return 1; + return 1; } @@ -162,7 +162,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) case TC_ACT_QUEUED: result = TC_ACT_STOLEN; break; - case TC_ACT_RECLASSIFY: + case TC_ACT_RECLASSIFY: case TC_ACT_OK: case TC_ACT_UNSPEC: default: @@ -172,7 +172,7 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch) }; /* backward compat */ #else -#ifdef CONFIG_NET_CLS_POLICE +#ifdef CONFIG_NET_CLS_POLICE switch (result) { case TC_POLICE_SHOT: result = NF_DROP; @@ -232,14 +232,14 @@ static unsigned int ingress_drop(struct Qdisc *sch) #ifdef CONFIG_NETFILTER static unsigned int ing_hook(unsigned int hook, struct sk_buff **pskb, - const struct net_device *indev, - const struct net_device *outdev, - int (*okfn)(struct sk_buff *)) + const struct net_device *indev, + const struct net_device *outdev, + int (*okfn)(struct sk_buff *)) { - + struct Qdisc *q; struct sk_buff *skb = *pskb; - struct net_device *dev = skb->dev; + struct net_device *dev = skb->dev; int fwres=NF_ACCEPT; DPRINTK("ing_hook: skb %s dev=%s len=%u\n", @@ -247,7 +247,7 @@ ing_hook(unsigned int hook, struct sk_buff **pskb, skb->dev ? (*pskb)->dev->name : "(no dev)", skb->len); -/* +/* revisit later: Use a private since lock dev->queue_lock is also used on the egress (might slow things for an iota) */ @@ -257,8 +257,8 @@ used on the egress (might slow things for an iota) if ((q = dev->qdisc_ingress) != NULL) fwres = q->enqueue(skb, q); spin_unlock(&dev->queue_lock); - } - + } + return fwres; } @@ -296,7 +296,7 @@ static int ingress_init(struct Qdisc *sch,struct rtattr *opt) printk("Ingress scheduler: Classifier actions prefered over netfilter\n"); #endif #endif - + #ifndef CONFIG_NET_CLS_ACT #ifdef CONFIG_NETFILTER if (!nf_registered) { @@ -417,7 +417,7 @@ static int __init ingress_module_init(void) return ret; } -static void __exit ingress_module_exit(void) +static void __exit ingress_module_exit(void) { unregister_qdisc(&ingress_qdisc_ops); #ifndef CONFIG_NET_CLS_ACT diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 79542af9dab..1ccbfb55b0b 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -7,7 +7,7 @@ * 2 of the License. * * Many of the algorithms and ideas for this came from - * NIST Net which is not copyrighted. + * NIST Net which is not copyrighted. * * Authors: Stephen Hemminger <shemminger@osdl.org> * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> @@ -114,7 +114,7 @@ static unsigned long get_crandom(struct crndstate *state) * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ -static long tabledist(unsigned long mu, long sigma, +static long tabledist(unsigned long mu, long sigma, struct crndstate *state, const struct disttable *dist) { long t, x; @@ -126,7 +126,7 @@ static long tabledist(unsigned long mu, long sigma, rnd = get_crandom(state); /* default uniform distribution */ - if (dist == NULL) + if (dist == NULL) return (rnd % (2*sigma)) - sigma + mu; t = dist->table[rnd % dist->size]; @@ -218,7 +218,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ++q->counter; ret = q->qdisc->enqueue(skb, q->qdisc); } else { - /* + /* * Do re-ordering by putting one out of N packets at the front * of the queue. */ @@ -323,7 +323,7 @@ static void netem_reset(struct Qdisc *sch) /* Pass size change message down to embedded FIFO */ static int set_fifo_limit(struct Qdisc *q, int limit) { - struct rtattr *rta; + struct rtattr *rta; int ret = -ENOMEM; /* Hack to avoid sending change message to non-FIFO */ @@ -333,9 +333,9 @@ static int set_fifo_limit(struct Qdisc *q, int limit) rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (rta) { rta->rta_type = RTM_NEWQDISC; - rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); + rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; - + ret = q->ops->change(q, rta); kfree(rta); } @@ -364,7 +364,7 @@ static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr) d->size = n; for (i = 0; i < n; i++) d->table[i] = data[i]; - + spin_lock_bh(&sch->dev->queue_lock); d = xchg(&q->delay_dist, d); spin_unlock_bh(&sch->dev->queue_lock); @@ -419,7 +419,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) struct netem_sched_data *q = qdisc_priv(sch); struct tc_netem_qopt *qopt; int ret; - + if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt)) return -EINVAL; @@ -429,7 +429,7 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) pr_debug("netem: can't set fifo limit\n"); return ret; } - + q->latency = qopt->latency; q->jitter = qopt->jitter; q->limit = qopt->limit; @@ -445,10 +445,10 @@ static int netem_change(struct Qdisc *sch, struct rtattr *opt) /* Handle nested options after initial queue options. * Should have put all options in nested format but too late now. - */ + */ if (RTA_PAYLOAD(opt) > sizeof(*qopt)) { struct rtattr *tb[TCA_NETEM_MAX]; - if (rtattr_parse(tb, TCA_NETEM_MAX, + if (rtattr_parse(tb, TCA_NETEM_MAX, RTA_DATA(opt) + sizeof(*qopt), RTA_PAYLOAD(opt) - sizeof(*qopt))) return -EINVAL; @@ -681,7 +681,7 @@ static void netem_put(struct Qdisc *sch, unsigned long arg) { } -static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, +static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, unsigned long *arg) { return -ENOSYS; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 000e043ebd6..9f957ca5073 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -7,7 +7,7 @@ * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> - * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: + * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: * Init -- EINVAL when opt undefined */ @@ -105,7 +105,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_SUCCESS; } sch->qstats.drops++; - return ret; + return ret; } @@ -453,7 +453,7 @@ static int __init prio_module_init(void) return register_qdisc(&prio_qdisc_ops); } -static void __exit prio_module_exit(void) +static void __exit prio_module_exit(void) { unregister_qdisc(&prio_qdisc_ops); } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index acddad08850..00db53eb815 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -185,7 +185,7 @@ static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit) TC_H_MAKE(sch->handle, 1)); if (q) { rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), - GFP_KERNEL); + GFP_KERNEL); if (rta) { rta->rta_type = RTM_NEWQDISC; rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 82844801e42..66f32051a99 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -53,7 +53,7 @@ Queuing using Deficit Round Robin", Proc. SIGCOMM 95. - This is not the thing that is usually called (W)FQ nowadays. + This is not the thing that is usually called (W)FQ nowadays. It does not use any timestamp mechanism, but instead processes queues in round-robin order. @@ -63,7 +63,7 @@ DRAWBACKS: - - "Stochastic" -> It is not 100% fair. + - "Stochastic" -> It is not 100% fair. When hash collisions occur, several flows are considered as one. - "Round-robin" -> It introduces larger delays than virtual clock @@ -501,7 +501,7 @@ static int __init sfq_module_init(void) { return register_qdisc(&sfq_qdisc_ops); } -static void __exit sfq_module_exit(void) +static void __exit sfq_module_exit(void) { unregister_qdisc(&sfq_qdisc_ops); } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index ed9b6d93854..85da8daa61d 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -276,7 +276,7 @@ static void tbf_reset(struct Qdisc* sch) static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) { struct Qdisc *q; - struct rtattr *rta; + struct rtattr *rta; int ret; q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, @@ -285,7 +285,7 @@ static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (rta) { rta->rta_type = RTM_NEWQDISC; - rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); + rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; ret = q->ops->change(q, rta); @@ -475,7 +475,7 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg) { } -static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, +static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca, unsigned long *arg) { return -ENOSYS; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 4c16ad57a3e..6a66037abac 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -178,7 +178,7 @@ teql_destroy(struct Qdisc* sch) teql_neigh_release(xchg(&dat->ncache, NULL)); break; } - + } while ((prev = q) != master->slaves); } } @@ -292,7 +292,7 @@ restart: do { struct net_device *slave = q->dev; - + if (slave->qdisc_sleeping != q) continue; if (netif_queue_stopped(slave) || ! netif_running(slave)) { @@ -425,7 +425,7 @@ static __init void teql_master_setup(struct net_device *dev) master->dev = dev; ops->priv_size = sizeof(struct teql_sched_data); - + ops->enqueue = teql_enqueue; ops->dequeue = teql_dequeue; ops->requeue = teql_requeue; @@ -489,7 +489,7 @@ static int __init teql_init(void) return i ? 0 : err; } -static void __exit teql_exit(void) +static void __exit teql_exit(void) { struct teql_master *master, *nxt; |