Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87990467 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller
Browse files

[HTB]: Lindent



Code was a mess in terms of indentation.  Run through Lindent
script, and cleanup the damage. Also, don't use, vim magic
comment, and substitute inline for __inline__.

Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 18a63e86
Loading
Loading
Loading
Loading
+526 −475
Original line number Diff line number Diff line
/* vim: ts=8 sw=8
/*
 * net/sched/sch_htb.c	Hierarchical token bucket, feed tree version
 *
 *		This program is free software; you can redistribute it and/or
@@ -86,8 +86,7 @@ enum htb_cmode {
};

/* interior & leaf nodes; props specific to leaves are marked L: */
struct htb_class
{
struct htb_class {
	/* general class parameters */
	u32 classid;
	struct gnet_stats_basic bstats;
@@ -151,7 +150,7 @@ struct htb_class
};

/* TODO: maybe compute rate when size is too large .. or drop ? */
static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
			   int size)
{
	int slot = size >> rate->rate.cell_log;
@@ -162,8 +161,7 @@ static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
	return rate->data[slot];
}

struct htb_sched
{
struct htb_sched {
	struct list_head root;	/* root classes list */
	struct list_head hash[HTB_HSIZE];	/* hashed by classid */
	struct list_head drops[TC_HTB_NUMPRIO];	/* active leaves (for drops) */
@@ -208,7 +206,7 @@ struct htb_sched
};

/* compute hash of size HTB_HSIZE for given handle */
static __inline__ int htb_hash(u32 h) 
static inline int htb_hash(u32 h)
{
#if HTB_HSIZE != 16
#error "Declare new hash for your HTB_HSIZE"
@@ -219,7 +217,7 @@ static __inline__ int htb_hash(u32 h)
}

/* find class in global hash table using given handle */
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
	struct htb_sched *q = qdisc_priv(sch);
	struct list_head *p;
@@ -252,7 +250,8 @@ static inline u32 htb_classid(struct htb_class *cl)
	return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}

static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
				      int *qerr)
{
	struct htb_sched *q = qdisc_priv(sch);
	struct htb_class *cl;
@@ -314,7 +313,8 @@ static void htb_add_to_id_tree (struct rb_root *root,
	struct rb_node **p = &root->rb_node, *parent = NULL;

	while (*p) {
		struct htb_class *c; parent = *p;
		struct htb_class *c;
		parent = *p;
		c = rb_entry(parent, struct htb_class, node[prio]);

		if (cl->classid > c->classid)
@@ -347,7 +347,8 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
		q->near_ev_cache[cl->level] = cl->pq_key;

	while (*p) {
		struct htb_class *c; parent = *p;
		struct htb_class *c;
		parent = *p;
		c = rb_entry(parent, struct htb_class, pq_node);
		if (time_after_eq(cl->pq_key, c->pq_key))
			p = &parent->rb_right;
@@ -392,7 +393,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
 * The class is removed from row at priorities marked in mask.
 * It does nothing if mask == 0.
 */
static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
static inline void htb_remove_class_from_row(struct htb_sched *q,
						 struct htb_class *cl, int mask)
{
	int m = 0;
@@ -422,8 +423,8 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
	long m, mask = cl->prio_activity;

	while (cl->cmode == HTB_MAY_BORROW && p && mask) {

		m = mask; while (m) {
		m = mask;
		while (m) {
			int prio = ffz(~m);
			m &= ~(1 << prio);

@@ -435,7 +436,8 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
		}
		p->prio_activity |= mask;
		cl = p; p = cl->parent;
		cl = p;
		p = cl->parent;

	}
	if (cl->cmode == HTB_CAN_SEND && mask)
@@ -454,9 +456,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
	struct htb_class *p = cl->parent;
	long m, mask = cl->prio_activity;


	while (cl->cmode == HTB_MAY_BORROW && p && mask) {
		m = mask; mask = 0; 
		m = mask;
		mask = 0;
		while (m) {
			int prio = ffz(~m);
			m &= ~(1 << prio);
@@ -476,7 +478,8 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
		}

		p->prio_activity &= ~mask;
		cl = p; p = cl->parent;
		cl = p;
		p = cl->parent;

	}
	if (cl->cmode == HTB_CAN_SEND && mask)
@@ -508,7 +511,7 @@ static inline long htb_hiwater(const struct htb_class *cl)
 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 * mode transitions per time unit. The speed gain is about 1/6.
 */
static __inline__ enum htb_cmode 
static inline enum htb_cmode
htb_class_mode(struct htb_class *cl, long *diff)
{
	long toks;
@@ -539,7 +542,6 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
{
	enum htb_cmode new_mode = htb_class_mode(cl, diff);


	if (new_mode == cl->cmode)
		return;

@@ -560,14 +562,15 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
 * for the prio. It can be called on already active leaf safely.
 * It also adds leaf into droplist.
 */
static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
	BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);

	if (!cl->prio_activity) {
		cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
		htb_activate_prios(q, cl);
		list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
		list_add_tail(&cl->un.leaf.drop_list,
			      q->drops + cl->un.leaf.aprio);
	}
}

@@ -577,8 +580,7 @@ static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
 * Make sure that leaf is active. In the other words it can't be called
 * with non-active leaf. It also removes class from the drop list.
 */
static __inline__ void 
htb_deactivate(struct htb_sched *q,struct htb_class *cl)
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
	BUG_TRAP(cl->prio_activity);

@@ -610,17 +612,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
		kfree_skb(skb);
		return ret;
#endif
    } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
	} else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
		   NET_XMIT_SUCCESS) {
		sch->qstats.drops++;
		cl->qstats.drops++;
		return NET_XMIT_DROP;
	} else {
	cl->bstats.packets++; cl->bstats.bytes += skb->len;
		cl->bstats.packets++;
		cl->bstats.bytes += skb->len;
		htb_activate(q, cl);
	}

	sch->q.qlen++;
    sch->bstats.packets++; sch->bstats.bytes += skb->len;
	sch->bstats.packets++;
	sch->bstats.bytes += skb->len;
	return NET_XMIT_SUCCESS;
}

@@ -643,7 +648,8 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
			sch->qstats.drops++;
			return NET_XMIT_CN;
		}
    } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
	} else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
		   NET_XMIT_SUCCESS) {
		sch->qstats.drops++;
		cl->qstats.drops++;
		return NET_XMIT_DROP;
@@ -716,7 +722,8 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
	while (cl) {
		diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
		if (cl->level >= level) {
			if (cl->level == level) cl->xstats.lends++;
			if (cl->level == level)
				cl->xstats.lends++;
			HTB_ACCNT(tokens, buffer, rate);
		} else {
			cl->xstats.borrows++;
@@ -725,7 +732,8 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
		HTB_ACCNT(ctokens, cbuffer, ceil);
		cl->t_c = q->now;

		old_mode = cl->cmode; diff = 0;
		old_mode = cl->cmode;
		diff = 0;
		htb_change_class_mode(q, cl, &diff);
		if (old_mode != cl->cmode) {
			if (old_mode != HTB_CAN_SEND)
@@ -733,10 +741,10 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
			if (cl->cmode != HTB_CAN_SEND)
				htb_add_to_wait_tree(q, cl, diff);
		}
		
#ifdef HTB_RATECM
		/* update rate counters */
		cl->sum_bytes += bytes; cl->sum_packets++;
		cl->sum_bytes += bytes;
		cl->sum_packets++;
#endif

		/* update byte stats except for leaves which are already updated */
@@ -763,8 +771,10 @@ static long htb_do_events(struct htb_sched *q,int level)
		struct htb_class *cl;
		long diff;
		struct rb_node *p = q->wait_pq[level].rb_node;
		if (!p) return 0;
		while (p->rb_left) p = p->rb_left;
		if (!p)
			return 0;
		while (p->rb_left)
			p = p->rb_left;

		cl = rb_entry(p, struct htb_class, pq_node);
		if (time_after(cl->pq_key, q->jiffies)) {
@@ -783,13 +793,15 @@ static long htb_do_events(struct htb_sched *q,int level)

/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
   is no such one exists. */
static struct rb_node *
htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
					      u32 id)
{
	struct rb_node *r = NULL;
	while (n) {
		struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
		if (id == cl->classid) return n;
		struct htb_class *cl =
		    rb_entry(n, struct htb_class, node[prio]);
		if (id == cl->classid)
			return n;

		if (id > cl->classid) {
			n = n->rb_right;
@@ -806,8 +818,8 @@ htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
 *
 * Find leaf where current feed pointers points to.
 */
static struct htb_class *
htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
					 struct rb_node **pptr, u32 * pid)
{
	int i;
	struct {
@@ -825,7 +837,8 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
		if (!*sp->pptr && *sp->pid) {
			/* ptr was invalidated but id is valid - try to recover 
			   the original or next ptr */
			*sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
			*sp->pptr =
			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
		}
		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
				   can become out of date quickly */
@@ -835,7 +848,9 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
				*sp->pptr = (*sp->pptr)->rb_left;
			if (sp > stk) {
				sp--;
				BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
				BUG_TRAP(*sp->pptr);
				if (!*sp->pptr)
					return NULL;
				htb_next_rb_node(sp->pptr);
			}
		} else {
@@ -854,19 +869,21 @@ htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)

/* dequeues packet at given priority and level; call only if
   you are sure that there is active class at prio/level */
static struct sk_buff *
htb_dequeue_tree(struct htb_sched *q,int prio,int level)
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
					int level)
{
	struct sk_buff *skb = NULL;
	struct htb_class *cl, *start;
	/* look initial class up in the row */
	start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
			q->ptr[level]+prio,q->last_ptr_id[level]+prio);
				     q->ptr[level] + prio,
				     q->last_ptr_id[level] + prio);

	do {
next:
		BUG_TRAP(cl);
		if (!cl) return NULL;
		if (!cl)
			return NULL;

		/* class can be empty - it is unlikely but can be true if leaf
		   qdisc drops packets in enqueue routine or if someone used
@@ -881,7 +898,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
				return NULL;

			next = htb_lookup_leaf(q->row[level] + prio,
					prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
					       prio, q->ptr[level] + prio,
					       q->last_ptr_id[level] + prio);

			if (cl == start)	/* fix start if we just deleted it */
				start = next;
@@ -889,15 +907,20 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
			goto next;
		}

		if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
		skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
		if (likely(skb != NULL))
			break;
		if (!cl->warned) {
			printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
			printk(KERN_WARNING
			       "htb: class %X isn't work conserving ?!\n",
			       cl->classid);
			cl->warned = 1;
		}
		q->nwc_hit++;
		htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
		cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio,
		htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
				  ptr[0]) + prio);
		cl = htb_lookup_leaf(q->row[level] + prio, prio,
				     q->ptr[level] + prio,
				     q->last_ptr_id[level] + prio);

	} while (cl != start);
@@ -905,7 +928,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
	if (likely(skb != NULL)) {
		if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
			cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
			htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
			htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
					  ptr[0]) + prio);
		}
		/* this used to be after charge_class but this constelation
		   gives us slightly better performance */
@@ -919,7 +943,8 @@ htb_dequeue_tree(struct htb_sched *q,int prio,int level)
static void htb_delay_by(struct Qdisc *sch, long delay)
{
	struct htb_sched *q = qdisc_priv(sch);
	if (delay <= 0) delay = 1;
	if (delay <= 0)
		delay = 1;
	if (unlikely(delay > 5 * HZ)) {
		if (net_ratelimit())
			printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
@@ -941,13 +966,15 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
	q->jiffies = jiffies;

	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
	if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
	skb = __skb_dequeue(&q->direct_queue);
	if (skb != NULL) {
		sch->flags &= ~TCQ_F_THROTTLED;
		sch->q.qlen--;
		return skb;
	}

	if (!sch->q.qlen) goto fin;
	if (!sch->q.qlen)
		goto fin;
	PSCHED_GET_TIME(q->now);

	min_delay = LONG_MAX;
@@ -958,7 +985,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
		long delay;
		if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
			delay = htb_do_events(q, level);
			q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
			q->near_ev_cache[level] =
			    q->jiffies + (delay ? delay : HZ);
		} else
			delay = q->near_ev_cache[level] - q->jiffies;

@@ -1015,7 +1043,8 @@ static void htb_reset(struct Qdisc* sch)
	for (i = 0; i < HTB_HSIZE; i++) {
		struct list_head *p;
		list_for_each(p, q->hash + i) {
			struct htb_class *cl = list_entry(p,struct htb_class,hlist);
			struct htb_class *cl =
			    list_entry(p, struct htb_class, hlist);
			if (cl->level)
				memset(&cl->un.inner, 0, sizeof(cl->un.inner));
			else {
@@ -1054,7 +1083,8 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
	}
	gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
	if (gopt->version != HTB_VER >> 16) {
		printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
		printk(KERN_ERR
		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",
		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
		return -EINVAL;
	}
@@ -1132,9 +1162,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,

	memset(&opt, 0, sizeof(opt));

	opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
	opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
	opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
	opt.rate = cl->rate->rate;
	opt.buffer = cl->buffer;
	opt.ceil = cl->ceil->rate;
	opt.cbuffer = cl->cbuffer;
	opt.quantum = cl->un.leaf.quantum;
	opt.prio = cl->un.leaf.prio;
	opt.level = cl->level;
	RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
	rta->rta_len = skb->tail - b;
@@ -1147,8 +1180,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
}

static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
	struct gnet_dump *d)
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
	struct htb_class *cl = (struct htb_class *)arg;

@@ -1177,7 +1209,8 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,

	if (cl && !cl->level) {
		if (new == NULL && (new = qdisc_create_dflt(sch->dev,
					&pfifo_qdisc_ops)) == NULL)
							    &pfifo_qdisc_ops))
		    == NULL)
			return -ENOBUFS;
		sch_tree_lock(sch);
		if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
@@ -1304,7 +1337,8 @@ static void htb_put(struct Qdisc *sch, unsigned long arg)
}

static int htb_change_class(struct Qdisc *sch, u32 classid,
		u32 parentid, struct rtattr **tca, unsigned long *arg)
			    u32 parentid, struct rtattr **tca,
			    unsigned long *arg)
{
	int err = -EINVAL;
	struct htb_sched *q = qdisc_priv(sch);
@@ -1326,12 +1360,14 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,

	rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
	ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
	if (!rtab || !ctab) goto failure;
	if (!rtab || !ctab)
		goto failure;

	if (!cl) {		/* new class */
		struct Qdisc *new_q;
		/* check for valid classid */
		if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
		if (!classid || TC_H_MAJ(classid ^ sch->handle)
		    || htb_find(classid, sch))
			goto failure;

		/* check maximal depth */
@@ -1373,7 +1409,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
		/* leaf (we) needs elementary qdisc */
		cl->un.leaf.q = new_q ? new_q : &noop_qdisc;

		cl->classid = classid; cl->parent = parent;
		cl->classid = classid;
		cl->parent = parent;

		/* set class to be in HTB_CAN_SEND state */
		cl->tokens = hopt->buffer;
@@ -1384,19 +1421,25 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,

		/* attach to the hash list and parent's family */
		list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
		list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
	} else sch_tree_lock(sch);
		list_add_tail(&cl->sibling,
			      parent ? &parent->children : &q->root);
	} else
		sch_tree_lock(sch);

	/* it used to be a nasty bug here, we have to check that node
	   is really leaf before changing cl->un.leaf ! */
	if (!cl->level) {
		cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
		if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
			printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
			printk(KERN_WARNING
			       "HTB: quantum of class %X is small. Consider r2q change.\n",
			       cl->classid);
			cl->un.leaf.quantum = 1000;
		}
		if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
			printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
			printk(KERN_WARNING
			       "HTB: quantum of class %X is big. Consider r2q change.\n",
			       cl->classid);
			cl->un.leaf.quantum = 200000;
		}
		if (hopt->quantum)
@@ -1407,16 +1450,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,

	cl->buffer = hopt->buffer;
	cl->cbuffer = hopt->cbuffer;
	if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
	if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
	if (cl->rate)
		qdisc_put_rtab(cl->rate);
	cl->rate = rtab;
	if (cl->ceil)
		qdisc_put_rtab(cl->ceil);
	cl->ceil = ctab;
	sch_tree_unlock(sch);

	*arg = (unsigned long)cl;
	return 0;

failure:
	if (rtab) qdisc_put_rtab(rtab);
	if (ctab) qdisc_put_rtab(ctab);
	if (rtab)
		qdisc_put_rtab(rtab);
	if (ctab)
		qdisc_put_rtab(ctab);
	return err;
}

@@ -1473,7 +1522,8 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
	for (i = 0; i < HTB_HSIZE; i++) {
		struct list_head *p;
		list_for_each(p, q->hash + i) {
			struct htb_class *cl = list_entry(p,struct htb_class,hlist);
			struct htb_class *cl =
			    list_entry(p, struct htb_class, hlist);
			if (arg->count < arg->skip) {
				arg->count++;
				continue;
@@ -1527,6 +1577,7 @@ static void __exit htb_module_exit(void)
{
	unregister_qdisc(&htb_qdisc_ops);
}

module_init(htb_module_init)
module_exit(htb_module_exit)
MODULE_LICENSE("GPL");