[HTB]: Lindent

Code was a mess in terms of indentation.  Run through Lindent
script, and cleanup the damage. Also, don't use, vim magic
comment, and substitute inline for __inline__.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Stephen Hemminger
2006-08-10 23:35:16 -07:00
committed by David S. Miller
parent 18a63e868b
commit 87990467d3

View File

@@ -1,4 +1,4 @@
/* vim: ts=8 sw=8 /*
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
@@ -86,8 +86,7 @@ enum htb_cmode {
}; };
/* interior & leaf nodes; props specific to leaves are marked L: */ /* interior & leaf nodes; props specific to leaves are marked L: */
struct htb_class struct htb_class {
{
/* general class parameters */ /* general class parameters */
u32 classid; u32 classid;
struct gnet_stats_basic bstats; struct gnet_stats_basic bstats;
@@ -151,7 +150,7 @@ struct htb_class
}; };
/* TODO: maybe compute rate when size is too large .. or drop ? */ /* TODO: maybe compute rate when size is too large .. or drop ? */
static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate, static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
int size) int size)
{ {
int slot = size >> rate->rate.cell_log; int slot = size >> rate->rate.cell_log;
@@ -162,8 +161,7 @@ static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
return rate->data[slot]; return rate->data[slot];
} }
struct htb_sched struct htb_sched {
{
struct list_head root; /* root classes list */ struct list_head root; /* root classes list */
struct list_head hash[HTB_HSIZE]; /* hashed by classid */ struct list_head hash[HTB_HSIZE]; /* hashed by classid */
struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */ struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
@@ -208,7 +206,7 @@ struct htb_sched
}; };
/* compute hash of size HTB_HSIZE for given handle */ /* compute hash of size HTB_HSIZE for given handle */
static __inline__ int htb_hash(u32 h) static inline int htb_hash(u32 h)
{ {
#if HTB_HSIZE != 16 #if HTB_HSIZE != 16
#error "Declare new hash for your HTB_HSIZE" #error "Declare new hash for your HTB_HSIZE"
@@ -219,7 +217,7 @@ static __inline__ int htb_hash(u32 h)
} }
/* find class in global hash table using given handle */ /* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch) static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{ {
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
struct list_head *p; struct list_head *p;
@@ -252,7 +250,8 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC; return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
} }
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{ {
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl; struct htb_class *cl;
@@ -314,7 +313,8 @@ static void htb_add_to_id_tree (struct rb_root *root,
struct rb_node **p = &root->rb_node, *parent = NULL; struct rb_node **p = &root->rb_node, *parent = NULL;
while (*p) { while (*p) {
struct htb_class *c; parent = *p; struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, node[prio]); c = rb_entry(parent, struct htb_class, node[prio]);
if (cl->classid > c->classid) if (cl->classid > c->classid)
@@ -347,7 +347,8 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
q->near_ev_cache[cl->level] = cl->pq_key; q->near_ev_cache[cl->level] = cl->pq_key;
while (*p) { while (*p) {
struct htb_class *c; parent = *p; struct htb_class *c;
parent = *p;
c = rb_entry(parent, struct htb_class, pq_node); c = rb_entry(parent, struct htb_class, pq_node);
if (time_after_eq(cl->pq_key, c->pq_key)) if (time_after_eq(cl->pq_key, c->pq_key))
p = &parent->rb_right; p = &parent->rb_right;
@@ -392,7 +393,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
* The class is removed from row at priorities marked in mask. * The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0. * It does nothing if mask == 0.
*/ */
static __inline__ void htb_remove_class_from_row(struct htb_sched *q, static inline void htb_remove_class_from_row(struct htb_sched *q,
struct htb_class *cl, int mask) struct htb_class *cl, int mask)
{ {
int m = 0; int m = 0;
@@ -422,8 +423,8 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
long m, mask = cl->prio_activity; long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) { while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask;
m = mask; while (m) { while (m) {
int prio = ffz(~m); int prio = ffz(~m);
m &= ~(1 << prio); m &= ~(1 << prio);
@@ -435,7 +436,8 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
} }
p->prio_activity |= mask; p->prio_activity |= mask;
cl = p; p = cl->parent; cl = p;
p = cl->parent;
} }
if (cl->cmode == HTB_CAN_SEND && mask) if (cl->cmode == HTB_CAN_SEND && mask)
@@ -454,9 +456,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
struct htb_class *p = cl->parent; struct htb_class *p = cl->parent;
long m, mask = cl->prio_activity; long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) { while (cl->cmode == HTB_MAY_BORROW && p && mask) {
m = mask; mask = 0; m = mask;
mask = 0;
while (m) { while (m) {
int prio = ffz(~m); int prio = ffz(~m);
m &= ~(1 << prio); m &= ~(1 << prio);
@@ -476,7 +478,8 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
} }
p->prio_activity &= ~mask; p->prio_activity &= ~mask;
cl = p; p = cl->parent; cl = p;
p = cl->parent;
} }
if (cl->cmode == HTB_CAN_SEND && mask) if (cl->cmode == HTB_CAN_SEND && mask)
@@ -508,7 +511,7 @@ static inline long htb_hiwater(const struct htb_class *cl)
* 0 .. -cl->{c,}buffer range. It is meant to limit number of * 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6. * mode transitions per time unit. The speed gain is about 1/6.
*/ */
static __inline__ enum htb_cmode static inline enum htb_cmode
htb_class_mode(struct htb_class *cl, long *diff) htb_class_mode(struct htb_class *cl, long *diff)
{ {
long toks; long toks;
@@ -539,7 +542,6 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
{ {
enum htb_cmode new_mode = htb_class_mode(cl, diff); enum htb_cmode new_mode = htb_class_mode(cl, diff);
if (new_mode == cl->cmode) if (new_mode == cl->cmode)
return; return;
@@ -560,14 +562,15 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
* for the prio. It can be called on already active leaf safely. * for the prio. It can be called on already active leaf safely.
* It also adds leaf into droplist. * It also adds leaf into droplist.
*/ */
static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl) static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{ {
BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) { if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
htb_activate_prios(q, cl); htb_activate_prios(q, cl);
list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio); list_add_tail(&cl->un.leaf.drop_list,
q->drops + cl->un.leaf.aprio);
} }
} }
@@ -577,8 +580,7 @@ static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
* Make sure that leaf is active. In the other words it can't be called * Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list. * with non-active leaf. It also removes class from the drop list.
*/ */
static __inline__ void static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
htb_deactivate(struct htb_sched *q,struct htb_class *cl)
{ {
BUG_TRAP(cl->prio_activity); BUG_TRAP(cl->prio_activity);
@@ -610,17 +612,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
#endif #endif
} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
NET_XMIT_SUCCESS) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
} else { } else {
cl->bstats.packets++; cl->bstats.bytes += skb->len; cl->bstats.packets++;
cl->bstats.bytes += skb->len;
htb_activate(q, cl); htb_activate(q, cl);
} }
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; sch->bstats.bytes += skb->len; sch->bstats.packets++;
sch->bstats.bytes += skb->len;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
@@ -643,7 +648,8 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++; sch->qstats.drops++;
return NET_XMIT_CN; return NET_XMIT_CN;
} }
} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
NET_XMIT_SUCCESS) {
sch->qstats.drops++; sch->qstats.drops++;
cl->qstats.drops++; cl->qstats.drops++;
return NET_XMIT_DROP; return NET_XMIT_DROP;
@@ -716,7 +722,8 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
while (cl) { while (cl) {
diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer); diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
if (cl->level >= level) { if (cl->level >= level) {
if (cl->level == level) cl->xstats.lends++; if (cl->level == level)
cl->xstats.lends++;
HTB_ACCNT(tokens, buffer, rate); HTB_ACCNT(tokens, buffer, rate);
} else { } else {
cl->xstats.borrows++; cl->xstats.borrows++;
@@ -725,7 +732,8 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
HTB_ACCNT(ctokens, cbuffer, ceil); HTB_ACCNT(ctokens, cbuffer, ceil);
cl->t_c = q->now; cl->t_c = q->now;
old_mode = cl->cmode; diff = 0; old_mode = cl->cmode;
diff = 0;
htb_change_class_mode(q, cl, &diff); htb_change_class_mode(q, cl, &diff);
if (old_mode != cl->cmode) { if (old_mode != cl->cmode) {
if (old_mode != HTB_CAN_SEND) if (old_mode != HTB_CAN_SEND)
@@ -733,10 +741,10 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
if (cl->cmode != HTB_CAN_SEND) if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff); htb_add_to_wait_tree(q, cl, diff);
} }
#ifdef HTB_RATECM #ifdef HTB_RATECM
/* update rate counters */ /* update rate counters */
cl->sum_bytes += bytes; cl->sum_packets++; cl->sum_bytes += bytes;
cl->sum_packets++;
#endif #endif
/* update byte stats except for leaves which are already updated */ /* update byte stats except for leaves which are already updated */
@@ -763,8 +771,10 @@ static long htb_do_events(struct htb_sched *q,int level)
struct htb_class *cl; struct htb_class *cl;
long diff; long diff;
struct rb_node *p = q->wait_pq[level].rb_node; struct rb_node *p = q->wait_pq[level].rb_node;
if (!p) return 0; if (!p)
while (p->rb_left) p = p->rb_left; return 0;
while (p->rb_left)
p = p->rb_left;
cl = rb_entry(p, struct htb_class, pq_node); cl = rb_entry(p, struct htb_class, pq_node);
if (time_after(cl->pq_key, q->jiffies)) { if (time_after(cl->pq_key, q->jiffies)) {
@@ -783,13 +793,15 @@ static long htb_do_events(struct htb_sched *q,int level)
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
is no such one exists. */ is no such one exists. */
static struct rb_node * static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
htb_id_find_next_upper(int prio,struct rb_node *n,u32 id) u32 id)
{ {
struct rb_node *r = NULL; struct rb_node *r = NULL;
while (n) { while (n) {
struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]); struct htb_class *cl =
if (id == cl->classid) return n; rb_entry(n, struct htb_class, node[prio]);
if (id == cl->classid)
return n;
if (id > cl->classid) { if (id > cl->classid) {
n = n->rb_right; n = n->rb_right;
@@ -806,8 +818,8 @@ htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
* *
* Find leaf where current feed pointers points to. * Find leaf where current feed pointers points to.
*/ */
static struct htb_class * static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid) struct rb_node **pptr, u32 * pid)
{ {
int i; int i;
struct { struct {
@@ -825,7 +837,8 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
if (!*sp->pptr && *sp->pid) { if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover /* ptr was invalidated but id is valid - try to recover
the original or next ptr */ the original or next ptr */
*sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid); *sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
} }
*sp->pid = 0; /* ptr is valid now so that remove this hint as it *sp->pid = 0; /* ptr is valid now so that remove this hint as it
can become out of date quickly */ can become out of date quickly */
@@ -835,7 +848,9 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
*sp->pptr = (*sp->pptr)->rb_left; *sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) { if (sp > stk) {
sp--; sp--;
BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL; BUG_TRAP(*sp->pptr);
if (!*sp->pptr)
return NULL;
htb_next_rb_node(sp->pptr); htb_next_rb_node(sp->pptr);
} }
} else { } else {
@@ -854,19 +869,21 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
/* dequeues packet at given priority and level; call only if /* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio/level */ you are sure that there is active class at prio/level */
static struct sk_buff * static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
htb_dequeue_tree(struct htb_sched *q,int prio,int level) int level)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct htb_class *cl, *start; struct htb_class *cl, *start;
/* look initial class up in the row */ /* look initial class up in the row */
start = cl = htb_lookup_leaf(q->row[level] + prio, prio, start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level]+prio,q->last_ptr_id[level]+prio); q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
do { do {
next: next:
BUG_TRAP(cl); BUG_TRAP(cl);
if (!cl) return NULL; if (!cl)
return NULL;
/* class can be empty - it is unlikely but can be true if leaf /* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used qdisc drops packets in enqueue routine or if someone used
@@ -881,7 +898,8 @@ next:
return NULL; return NULL;
next = htb_lookup_leaf(q->row[level] + prio, next = htb_lookup_leaf(q->row[level] + prio,
prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio); prio, q->ptr[level] + prio,
q->last_ptr_id[level] + prio);
if (cl == start) /* fix start if we just deleted it */ if (cl == start) /* fix start if we just deleted it */
start = next; start = next;
@@ -889,15 +907,20 @@ next:
goto next; goto next;
} }
if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
if (likely(skb != NULL))
break; break;
if (!cl->warned) { if (!cl->warned) {
printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid); printk(KERN_WARNING
"htb: class %X isn't work conserving ?!\n",
cl->classid);
cl->warned = 1; cl->warned = 1;
} }
q->nwc_hit++; q->nwc_hit++;
htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio, ptr[0]) + prio);
cl = htb_lookup_leaf(q->row[level] + prio, prio,
q->ptr[level] + prio,
q->last_ptr_id[level] + prio); q->last_ptr_id[level] + prio);
} while (cl != start); } while (cl != start);
@@ -905,7 +928,8 @@ next:
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
cl->un.leaf.deficit[level] += cl->un.leaf.quantum; cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio); htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
ptr[0]) + prio);
} }
/* this used to be after charge_class but this constelation /* this used to be after charge_class but this constelation
gives us slightly better performance */ gives us slightly better performance */
@@ -919,7 +943,8 @@ next:
static void htb_delay_by(struct Qdisc *sch, long delay) static void htb_delay_by(struct Qdisc *sch, long delay)
{ {
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
if (delay <= 0) delay = 1; if (delay <= 0)
delay = 1;
if (unlikely(delay > 5 * HZ)) { if (unlikely(delay > 5 * HZ)) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_INFO "HTB delay %ld > 5sec\n", delay); printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
@@ -941,13 +966,15 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
q->jiffies = jiffies; q->jiffies = jiffies;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */ /* try to dequeue direct packets as high prio (!) to minimize cpu work */
if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) { skb = __skb_dequeue(&q->direct_queue);
if (skb != NULL) {
sch->flags &= ~TCQ_F_THROTTLED; sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--; sch->q.qlen--;
return skb; return skb;
} }
if (!sch->q.qlen) goto fin; if (!sch->q.qlen)
goto fin;
PSCHED_GET_TIME(q->now); PSCHED_GET_TIME(q->now);
min_delay = LONG_MAX; min_delay = LONG_MAX;
@@ -958,7 +985,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
long delay; long delay;
if (time_after_eq(q->jiffies, q->near_ev_cache[level])) { if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
delay = htb_do_events(q, level); delay = htb_do_events(q, level);
q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ); q->near_ev_cache[level] =
q->jiffies + (delay ? delay : HZ);
} else } else
delay = q->near_ev_cache[level] - q->jiffies; delay = q->near_ev_cache[level] - q->jiffies;
@@ -1015,7 +1043,8 @@ static void htb_reset(struct Qdisc* sch)
for (i = 0; i < HTB_HSIZE; i++) { for (i = 0; i < HTB_HSIZE; i++) {
struct list_head *p; struct list_head *p;
list_for_each(p, q->hash + i) { list_for_each(p, q->hash + i) {
struct htb_class *cl = list_entry(p,struct htb_class,hlist); struct htb_class *cl =
list_entry(p, struct htb_class, hlist);
if (cl->level) if (cl->level)
memset(&cl->un.inner, 0, sizeof(cl->un.inner)); memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else { else {
@@ -1054,7 +1083,8 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
} }
gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]); gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
if (gopt->version != HTB_VER >> 16) { if (gopt->version != HTB_VER >> 16) {
printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n", printk(KERN_ERR
"HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL; return -EINVAL;
} }
@@ -1132,9 +1162,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
memset(&opt, 0, sizeof(opt)); memset(&opt, 0, sizeof(opt));
opt.rate = cl->rate->rate; opt.buffer = cl->buffer; opt.rate = cl->rate->rate;
opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer; opt.buffer = cl->buffer;
opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio; opt.ceil = cl->ceil->rate;
opt.cbuffer = cl->cbuffer;
opt.quantum = cl->un.leaf.quantum;
opt.prio = cl->un.leaf.prio;
opt.level = cl->level; opt.level = cl->level;
RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
rta->rta_len = skb->tail - b; rta->rta_len = skb->tail - b;
@@ -1147,8 +1180,7 @@ rtattr_failure:
} }
static int static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
struct gnet_dump *d)
{ {
struct htb_class *cl = (struct htb_class *)arg; struct htb_class *cl = (struct htb_class *)arg;
@@ -1177,7 +1209,8 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
if (cl && !cl->level) { if (cl && !cl->level) {
if (new == NULL && (new = qdisc_create_dflt(sch->dev, if (new == NULL && (new = qdisc_create_dflt(sch->dev,
&pfifo_qdisc_ops)) == NULL) &pfifo_qdisc_ops))
== NULL)
return -ENOBUFS; return -ENOBUFS;
sch_tree_lock(sch); sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
@@ -1304,7 +1337,8 @@ static void htb_put(struct Qdisc *sch, unsigned long arg)
} }
static int htb_change_class(struct Qdisc *sch, u32 classid, static int htb_change_class(struct Qdisc *sch, u32 classid,
u32 parentid, struct rtattr **tca, unsigned long *arg) u32 parentid, struct rtattr **tca,
unsigned long *arg)
{ {
int err = -EINVAL; int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
@@ -1326,12 +1360,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]); rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]); ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
if (!rtab || !ctab) goto failure; if (!rtab || !ctab)
goto failure;
if (!cl) { /* new class */ if (!cl) { /* new class */
struct Qdisc *new_q; struct Qdisc *new_q;
/* check for valid classid */ /* check for valid classid */
if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch)) if (!classid || TC_H_MAJ(classid ^ sch->handle)
|| htb_find(classid, sch))
goto failure; goto failure;
/* check maximal depth */ /* check maximal depth */
@@ -1373,7 +1409,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* leaf (we) needs elementary qdisc */ /* leaf (we) needs elementary qdisc */
cl->un.leaf.q = new_q ? new_q : &noop_qdisc; cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
cl->classid = classid; cl->parent = parent; cl->classid = classid;
cl->parent = parent;
/* set class to be in HTB_CAN_SEND state */ /* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer; cl->tokens = hopt->buffer;
@@ -1384,19 +1421,25 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* attach to the hash list and parent's family */ /* attach to the hash list and parent's family */
list_add_tail(&cl->hlist, q->hash + htb_hash(classid)); list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
list_add_tail(&cl->sibling, parent ? &parent->children : &q->root); list_add_tail(&cl->sibling,
} else sch_tree_lock(sch); parent ? &parent->children : &q->root);
} else
sch_tree_lock(sch);
/* it used to be a nasty bug here, we have to check that node /* it used to be a nasty bug here, we have to check that node
is really leaf before changing cl->un.leaf ! */ is really leaf before changing cl->un.leaf ! */
if (!cl->level) { if (!cl->level) {
cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum; cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->un.leaf.quantum < 1000) { if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid); printk(KERN_WARNING
"HTB: quantum of class %X is small. Consider r2q change.\n",
cl->classid);
cl->un.leaf.quantum = 1000; cl->un.leaf.quantum = 1000;
} }
if (!hopt->quantum && cl->un.leaf.quantum > 200000) { if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid); printk(KERN_WARNING
"HTB: quantum of class %X is big. Consider r2q change.\n",
cl->classid);
cl->un.leaf.quantum = 200000; cl->un.leaf.quantum = 200000;
} }
if (hopt->quantum) if (hopt->quantum)
@@ -1407,16 +1450,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->buffer = hopt->buffer; cl->buffer = hopt->buffer;
cl->cbuffer = hopt->cbuffer; cl->cbuffer = hopt->cbuffer;
if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab; if (cl->rate)
if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab; qdisc_put_rtab(cl->rate);
cl->rate = rtab;
if (cl->ceil)
qdisc_put_rtab(cl->ceil);
cl->ceil = ctab;
sch_tree_unlock(sch); sch_tree_unlock(sch);
*arg = (unsigned long)cl; *arg = (unsigned long)cl;
return 0; return 0;
failure: failure:
if (rtab) qdisc_put_rtab(rtab); if (rtab)
if (ctab) qdisc_put_rtab(ctab); qdisc_put_rtab(rtab);
if (ctab)
qdisc_put_rtab(ctab);
return err; return err;
} }
@@ -1473,7 +1522,8 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
for (i = 0; i < HTB_HSIZE; i++) { for (i = 0; i < HTB_HSIZE; i++) {
struct list_head *p; struct list_head *p;
list_for_each(p, q->hash + i) { list_for_each(p, q->hash + i) {
struct htb_class *cl = list_entry(p,struct htb_class,hlist); struct htb_class *cl =
list_entry(p, struct htb_class, hlist);
if (arg->count < arg->skip) { if (arg->count < arg->skip) {
arg->count++; arg->count++;
continue; continue;
@@ -1527,6 +1577,7 @@ static void __exit htb_module_exit(void)
{ {
unregister_qdisc(&htb_qdisc_ops); unregister_qdisc(&htb_qdisc_ops);
} }
module_init(htb_module_init) module_init(htb_module_init)
module_exit(htb_module_exit) module_exit(htb_module_exit)
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");