2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
23 * spotted bug in dequeue code and helped with fix
25 * fixed requeue routine
26 * and many others. thanks.
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
30 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 #include <linux/bitops.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
39 #include <linux/socket.h>
40 #include <linux/sockios.h>
42 #include <linux/errno.h>
43 #include <linux/interrupt.h>
44 #include <linux/if_ether.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/notifier.h>
50 #include <net/route.h>
51 #include <linux/skbuff.h>
52 #include <linux/list.h>
53 #include <linux/compiler.h>
55 #include <net/pkt_sched.h>
56 #include <linux/rbtree.h>
60 ========================================================================
61 HTB is like TBF with multiple classes. It is also similar to CBQ because
62 it allows to assign priority to each class in hierarchy.
63 In fact it is another implementation of Floyd's formal sharing.
66 Each class is assigned level. Leaf has ALWAYS level 0 and root
67 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68 one less than their parent.
71 #define HTB_HSIZE 16 /* classid hash size */
72 #define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
73 #define HTB_RATECM 1 /* whether to use rate computer */
74 #define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
75 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
77 #if HTB_VER >> 16 != TC_HTB_PROTOVER
78 #error "Mismatched sch_htb.c and pkt_sch.h"
81 /* used internaly to keep status of single class */
83 HTB_CANT_SEND, /* class can't send and can't borrow */
84 HTB_MAY_BORROW, /* class can't send but may borrow */
85 HTB_CAN_SEND /* class can send */
88 /* interior & leaf nodes; props specific to leaves are marked L: */
90 /* general class parameters */
92 struct gnet_stats_basic bstats;
93 struct gnet_stats_queue qstats;
94 struct gnet_stats_rate_est rate_est;
95 struct tc_htb_xstats xstats; /* our special stats */
96 int refcnt; /* usage count of this class */
99 /* rate measurement counters */
100 unsigned long rate_bytes, sum_bytes;
101 unsigned long rate_packets, sum_packets;
105 int level; /* our level (see above) */
106 struct htb_class *parent; /* parent class */
107 struct list_head hlist; /* classid hash list item */
108 struct list_head sibling; /* sibling list item */
109 struct list_head children; /* children list */
112 struct htb_class_leaf {
117 int deficit[TC_HTB_MAXDEPTH];
118 struct list_head drop_list;
120 struct htb_class_inner {
121 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
122 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
123 /* When class changes from state 1->2 and disconnects from
124 parent's feed then we lost ptr value and start from the
125 first child again. Here we store classid of the
126 last valid ptr (used when ptr is NULL). */
127 u32 last_ptr_id[TC_HTB_NUMPRIO];
130 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
131 struct rb_node pq_node; /* node for event queue */
132 unsigned long pq_key; /* the same type as jiffies global */
134 int prio_activity; /* for which prios are we active */
135 enum htb_cmode cmode; /* current mode of the class */
137 /* class attached filters */
138 struct tcf_proto *filter_list;
141 int warned; /* only one warning about non work conserving .. */
143 /* token bucket parameters */
144 struct qdisc_rate_table *rate; /* rate table of the class itself */
145 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
146 long buffer, cbuffer; /* token bucket depth/rate */
147 psched_tdiff_t mbuffer; /* max wait time */
148 long tokens, ctokens; /* current number of tokens */
149 psched_time_t t_c; /* checkpoint time */
152 /* TODO: maybe compute rate when size is too large .. or drop ? */
153 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
156 int slot = size >> rate->rate.cell_log;
161 return rate->data[slot];
165 struct list_head root; /* root classes list */
166 struct list_head hash[HTB_HSIZE]; /* hashed by classid */
167 struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
169 /* self list - roots of self generating tree */
170 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
171 int row_mask[TC_HTB_MAXDEPTH];
172 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
173 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
175 /* self wait list - roots of wait PQs per row */
176 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
178 /* time of nearest event per level (row) */
179 unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
181 /* cached value of jiffies in dequeue */
182 unsigned long jiffies;
184 /* whether we hit non-work conserving class during this dequeue; we use */
185 int nwc_hit; /* this to disable mindelay complaint in dequeue */
187 int defcls; /* class where unclassified flows go to */
189 /* filters for qdisc itself */
190 struct tcf_proto *filter_list;
193 int rate2quantum; /* quant = rate / rate2quantum */
194 psched_time_t now; /* cached dequeue time */
195 struct timer_list timer; /* send delay timer */
197 struct timer_list rttim; /* rate computer timer */
198 int recmp_bucket; /* which hash bucket to recompute next */
201 /* non shaped skbs; let them go directly thru */
202 struct sk_buff_head direct_queue;
203 int direct_qlen; /* max qlen of above */
208 /* compute hash of size HTB_HSIZE for given handle */
209 static inline int htb_hash(u32 h)
212 #error "Declare new hash for your HTB_HSIZE"
214 h ^= h >> 8; /* stolen from cbq_hash */
219 /* find class in global hash table using given handle */
220 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
222 struct htb_sched *q = qdisc_priv(sch);
224 if (TC_H_MAJ(handle) != sch->handle)
227 list_for_each(p, q->hash + htb_hash(handle)) {
228 struct htb_class *cl = list_entry(p, struct htb_class, hlist);
229 if (cl->classid == handle)
236 * htb_classify - classify a packet into class
238 * It returns NULL if the packet should be dropped or -1 if the packet
239 * should be passed directly thru. In all other cases leaf class is returned.
240 * We allow direct class selection by classid in priority. The we examine
241 * filters in qdisc and in inner nodes (if higher filter points to the inner
242 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
243 * internal fifo (direct). These packets then go directly thru. If we still
244 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
245 * then finish and return direct queue.
247 #define HTB_DIRECT (struct htb_class*)-1
248 static inline u32 htb_classid(struct htb_class *cl)
250 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
253 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
256 struct htb_sched *q = qdisc_priv(sch);
257 struct htb_class *cl;
258 struct tcf_result res;
259 struct tcf_proto *tcf;
262 /* allow to select class by setting skb->priority to valid classid;
263 note that nfmark can be used too by attaching filter fw with no
265 if (skb->priority == sch->handle)
266 return HTB_DIRECT; /* X:0 (direct flow) selected */
267 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
270 *qerr = NET_XMIT_BYPASS;
271 tcf = q->filter_list;
272 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
273 #ifdef CONFIG_NET_CLS_ACT
277 *qerr = NET_XMIT_SUCCESS;
281 #elif defined(CONFIG_NET_CLS_POLICE)
282 if (result == TC_POLICE_SHOT)
285 if ((cl = (void *)res.class) == NULL) {
286 if (res.classid == sch->handle)
287 return HTB_DIRECT; /* X:0 (direct flow) */
288 if ((cl = htb_find(res.classid, sch)) == NULL)
289 break; /* filter selected invalid classid */
292 return cl; /* we hit leaf; return it */
294 /* we have got inner class; apply inner filter chain */
295 tcf = cl->filter_list;
297 /* classification failed; try to use default class */
298 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
299 if (!cl || cl->level)
300 return HTB_DIRECT; /* bad default .. this is safe bet */
305 * htb_add_to_id_tree - adds class to the round robin list
307 * Routine adds class to the list (actually tree) sorted by classid.
308 * Make sure that class is not already on such list for given prio.
310 static void htb_add_to_id_tree(struct rb_root *root,
311 struct htb_class *cl, int prio)
313 struct rb_node **p = &root->rb_node, *parent = NULL;
318 c = rb_entry(parent, struct htb_class, node[prio]);
320 if (cl->classid > c->classid)
321 p = &parent->rb_right;
323 p = &parent->rb_left;
325 rb_link_node(&cl->node[prio], parent, p);
326 rb_insert_color(&cl->node[prio], root);
330 * htb_add_to_wait_tree - adds class to the event queue with delay
332 * The class is added to priority event queue to indicate that class will
333 * change its mode in cl->pq_key microseconds. Make sure that class is not
334 * already in the queue.
336 static void htb_add_to_wait_tree(struct htb_sched *q,
337 struct htb_class *cl, long delay)
339 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
341 cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
342 if (cl->pq_key == q->jiffies)
345 /* update the nearest event cache */
346 if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
347 q->near_ev_cache[cl->level] = cl->pq_key;
352 c = rb_entry(parent, struct htb_class, pq_node);
353 if (time_after_eq(cl->pq_key, c->pq_key))
354 p = &parent->rb_right;
356 p = &parent->rb_left;
358 rb_link_node(&cl->pq_node, parent, p);
359 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
363 * htb_next_rb_node - finds next node in binary tree
365 * When we are past last key we return NULL.
366 * Average complexity is 2 steps per call.
368 static void htb_next_rb_node(struct rb_node **n)
374 * htb_add_class_to_row - add class to its row
376 * The class is added to row at priorities marked in mask.
377 * It does nothing if mask == 0.
379 static inline void htb_add_class_to_row(struct htb_sched *q,
380 struct htb_class *cl, int mask)
382 q->row_mask[cl->level] |= mask;
384 int prio = ffz(~mask);
385 mask &= ~(1 << prio);
386 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
391 * htb_remove_class_from_row - removes class from its row
393 * The class is removed from row at priorities marked in mask.
394 * It does nothing if mask == 0.
396 static inline void htb_remove_class_from_row(struct htb_sched *q,
397 struct htb_class *cl, int mask)
402 int prio = ffz(~mask);
403 mask &= ~(1 << prio);
404 if (q->ptr[cl->level][prio] == cl->node + prio)
405 htb_next_rb_node(q->ptr[cl->level] + prio);
406 rb_erase(cl->node + prio, q->row[cl->level] + prio);
407 if (!q->row[cl->level][prio].rb_node)
410 q->row_mask[cl->level] &= ~m;
414 * htb_activate_prios - creates active classe's feed chain
416 * The class is connected to ancestors and/or appropriate rows
417 * for priorities it is participating on. cl->cmode must be new
418 * (activated) mode. It does nothing if cl->prio_activity == 0.
420 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
422 struct htb_class *p = cl->parent;
423 long m, mask = cl->prio_activity;
425 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
431 if (p->un.inner.feed[prio].rb_node)
432 /* parent already has its feed in use so that
433 reset bit in mask as parent is already ok */
434 mask &= ~(1 << prio);
436 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
438 p->prio_activity |= mask;
443 if (cl->cmode == HTB_CAN_SEND && mask)
444 htb_add_class_to_row(q, cl, mask);
448 * htb_deactivate_prios - remove class from feed chain
450 * cl->cmode must represent old mode (before deactivation). It does
451 * nothing if cl->prio_activity == 0. Class is removed from all feed
454 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
456 struct htb_class *p = cl->parent;
457 long m, mask = cl->prio_activity;
459 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
466 if (p->un.inner.ptr[prio] == cl->node + prio) {
467 /* we are removing child which is pointed to from
468 parent feed - forget the pointer but remember
470 p->un.inner.last_ptr_id[prio] = cl->classid;
471 p->un.inner.ptr[prio] = NULL;
474 rb_erase(cl->node + prio, p->un.inner.feed + prio);
476 if (!p->un.inner.feed[prio].rb_node)
480 p->prio_activity &= ~mask;
485 if (cl->cmode == HTB_CAN_SEND && mask)
486 htb_remove_class_from_row(q, cl, mask);
490 static inline long htb_lowater(const struct htb_class *cl)
492 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
494 static inline long htb_hiwater(const struct htb_class *cl)
496 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
499 #define htb_lowater(cl) (0)
500 #define htb_hiwater(cl) (0)
504 * htb_class_mode - computes and returns current class mode
506 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
507 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
508 * from now to time when cl will change its state.
509 * Also it is worth to note that class mode doesn't change simply
510 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
511 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
512 * mode transitions per time unit. The speed gain is about 1/6.
514 static inline enum htb_cmode
515 htb_class_mode(struct htb_class *cl, long *diff)
519 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
521 return HTB_CANT_SEND;
524 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
528 return HTB_MAY_BORROW;
532 * htb_change_class_mode - changes classe's mode
534 * This should be the only way how to change classe's mode under normal
535 * cirsumstances. Routine will update feed lists linkage, change mode
536 * and add class to the wait event queue if appropriate. New mode should
537 * be different from old one and cl->pq_key has to be valid if changing
538 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
541 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
543 enum htb_cmode new_mode = htb_class_mode(cl, diff);
545 if (new_mode == cl->cmode)
548 if (cl->prio_activity) { /* not necessary: speed optimization */
549 if (cl->cmode != HTB_CANT_SEND)
550 htb_deactivate_prios(q, cl);
551 cl->cmode = new_mode;
552 if (new_mode != HTB_CANT_SEND)
553 htb_activate_prios(q, cl);
555 cl->cmode = new_mode;
559 * htb_activate - inserts leaf cl into appropriate active feeds
561 * Routine learns (new) priority of leaf and activates feed chain
562 * for the prio. It can be called on already active leaf safely.
563 * It also adds leaf into droplist.
565 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
567 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
569 if (!cl->prio_activity) {
570 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
571 htb_activate_prios(q, cl);
572 list_add_tail(&cl->un.leaf.drop_list,
573 q->drops + cl->un.leaf.aprio);
578 * htb_deactivate - remove leaf cl from active feeds
580 * Make sure that leaf is active. In the other words it can't be called
581 * with non-active leaf. It also removes class from the drop list.
583 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
585 BUG_TRAP(cl->prio_activity);
587 htb_deactivate_prios(q, cl);
588 cl->prio_activity = 0;
589 list_del_init(&cl->un.leaf.drop_list);
592 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
595 struct htb_sched *q = qdisc_priv(sch);
596 struct htb_class *cl = htb_classify(skb, sch, &ret);
598 if (cl == HTB_DIRECT) {
599 /* enqueue to helper queue */
600 if (q->direct_queue.qlen < q->direct_qlen) {
601 __skb_queue_tail(&q->direct_queue, skb);
606 return NET_XMIT_DROP;
608 #ifdef CONFIG_NET_CLS_ACT
610 if (ret == NET_XMIT_BYPASS)
615 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
619 return NET_XMIT_DROP;
621 cl->bstats.packets++;
622 cl->bstats.bytes += skb->len;
627 sch->bstats.packets++;
628 sch->bstats.bytes += skb->len;
629 return NET_XMIT_SUCCESS;
632 /* TODO: requeuing packet charges it to policers again !! */
633 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
635 struct htb_sched *q = qdisc_priv(sch);
636 int ret = NET_XMIT_SUCCESS;
637 struct htb_class *cl = htb_classify(skb, sch, &ret);
638 struct sk_buff *tskb;
640 if (cl == HTB_DIRECT || !cl) {
641 /* enqueue to helper queue */
642 if (q->direct_queue.qlen < q->direct_qlen && cl) {
643 __skb_queue_head(&q->direct_queue, skb);
645 __skb_queue_head(&q->direct_queue, skb);
646 tskb = __skb_dequeue_tail(&q->direct_queue);
651 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
655 return NET_XMIT_DROP;
660 sch->qstats.requeues++;
661 return NET_XMIT_SUCCESS;
664 static void htb_timer(unsigned long arg)
666 struct Qdisc *sch = (struct Qdisc *)arg;
667 sch->flags &= ~TCQ_F_THROTTLED;
669 netif_schedule(sch->dev);
673 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
674 static void htb_rate_timer(unsigned long arg)
676 struct Qdisc *sch = (struct Qdisc *)arg;
677 struct htb_sched *q = qdisc_priv(sch);
680 /* lock queue so that we can muck with it */
681 spin_lock_bh(&sch->dev->queue_lock);
683 q->rttim.expires = jiffies + HZ;
684 add_timer(&q->rttim);
686 /* scan and recompute one bucket at time */
687 if (++q->recmp_bucket >= HTB_HSIZE)
689 list_for_each(p, q->hash + q->recmp_bucket) {
690 struct htb_class *cl = list_entry(p, struct htb_class, hlist);
692 RT_GEN(cl->sum_bytes, cl->rate_bytes);
693 RT_GEN(cl->sum_packets, cl->rate_packets);
695 spin_unlock_bh(&sch->dev->queue_lock);
700 * htb_charge_class - charges amount "bytes" to leaf and ancestors
702 * Routine assumes that packet "bytes" long was dequeued from leaf cl
703 * borrowing from "level". It accounts bytes to ceil leaky bucket for
704 * leaf and all ancestors and to rate bucket for ancestors at levels
705 * "level" and higher. It also handles possible change of mode resulting
706 * from the update. Note that mode can also increase here (MAY_BORROW to
707 * CAN_SEND) because we can use more precise clock that event queue here.
708 * In such case we remove class from event queue first.
710 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
711 int level, int bytes)
714 enum htb_cmode old_mode;
716 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
717 if (toks > cl->B) toks = cl->B; \
718 toks -= L2T(cl, cl->R, bytes); \
719 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
723 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
724 if (cl->level >= level) {
725 if (cl->level == level)
727 HTB_ACCNT(tokens, buffer, rate);
729 cl->xstats.borrows++;
730 cl->tokens += diff; /* we moved t_c; update tokens */
732 HTB_ACCNT(ctokens, cbuffer, ceil);
735 old_mode = cl->cmode;
737 htb_change_class_mode(q, cl, &diff);
738 if (old_mode != cl->cmode) {
739 if (old_mode != HTB_CAN_SEND)
740 rb_erase(&cl->pq_node, q->wait_pq + cl->level);
741 if (cl->cmode != HTB_CAN_SEND)
742 htb_add_to_wait_tree(q, cl, diff);
745 /* update rate counters */
746 cl->sum_bytes += bytes;
750 /* update byte stats except for leaves which are already updated */
752 cl->bstats.bytes += bytes;
753 cl->bstats.packets++;
760 * htb_do_events - make mode changes to classes at the level
762 * Scans event queue for pending events and applies them. Returns jiffies to
763 * next pending event (0 for no event in pq).
764 * Note: Aplied are events whose have cl->pq_key <= jiffies.
766 static long htb_do_events(struct htb_sched *q, int level)
770 for (i = 0; i < 500; i++) {
771 struct htb_class *cl;
773 struct rb_node *p = q->wait_pq[level].rb_node;
779 cl = rb_entry(p, struct htb_class, pq_node);
780 if (time_after(cl->pq_key, q->jiffies)) {
781 return cl->pq_key - q->jiffies;
783 rb_erase(p, q->wait_pq + level);
784 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
785 htb_change_class_mode(q, cl, &diff);
786 if (cl->cmode != HTB_CAN_SEND)
787 htb_add_to_wait_tree(q, cl, diff);
790 printk(KERN_WARNING "htb: too many events !\n");
794 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
795 is no such one exists. */
796 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
799 struct rb_node *r = NULL;
801 struct htb_class *cl =
802 rb_entry(n, struct htb_class, node[prio]);
803 if (id == cl->classid)
806 if (id > cl->classid) {
817 * htb_lookup_leaf - returns next leaf class in DRR order
819 * Find leaf where current feed pointers points to.
821 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
822 struct rb_node **pptr, u32 * pid)
826 struct rb_node *root;
827 struct rb_node **pptr;
829 } stk[TC_HTB_MAXDEPTH], *sp = stk;
831 BUG_TRAP(tree->rb_node);
832 sp->root = tree->rb_node;
836 for (i = 0; i < 65535; i++) {
837 if (!*sp->pptr && *sp->pid) {
838 /* ptr was invalidated but id is valid - try to recover
839 the original or next ptr */
841 htb_id_find_next_upper(prio, sp->root, *sp->pid);
843 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
844 can become out of date quickly */
845 if (!*sp->pptr) { /* we are at right end; rewind & go up */
846 *sp->pptr = sp->root;
847 while ((*sp->pptr)->rb_left)
848 *sp->pptr = (*sp->pptr)->rb_left;
854 htb_next_rb_node(sp->pptr);
857 struct htb_class *cl;
858 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
861 (++sp)->root = cl->un.inner.feed[prio].rb_node;
862 sp->pptr = cl->un.inner.ptr + prio;
863 sp->pid = cl->un.inner.last_ptr_id + prio;
870 /* dequeues packet at given priority and level; call only if
871 you are sure that there is active class at prio/level */
872 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
875 struct sk_buff *skb = NULL;
876 struct htb_class *cl, *start;
877 /* look initial class up in the row */
878 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
879 q->ptr[level] + prio,
880 q->last_ptr_id[level] + prio);
888 /* class can be empty - it is unlikely but can be true if leaf
889 qdisc drops packets in enqueue routine or if someone used
890 graft operation on the leaf since last dequeue;
891 simply deactivate and skip such class */
892 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
893 struct htb_class *next;
894 htb_deactivate(q, cl);
896 /* row/level might become empty */
897 if ((q->row_mask[level] & (1 << prio)) == 0)
900 next = htb_lookup_leaf(q->row[level] + prio,
901 prio, q->ptr[level] + prio,
902 q->last_ptr_id[level] + prio);
904 if (cl == start) /* fix start if we just deleted it */
910 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
911 if (likely(skb != NULL))
915 "htb: class %X isn't work conserving ?!\n",
920 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
922 cl = htb_lookup_leaf(q->row[level] + prio, prio,
923 q->ptr[level] + prio,
924 q->last_ptr_id[level] + prio);
926 } while (cl != start);
928 if (likely(skb != NULL)) {
929 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
930 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
931 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
934 /* this used to be after charge_class but this constelation
935 gives us slightly better performance */
936 if (!cl->un.leaf.q->q.qlen)
937 htb_deactivate(q, cl);
938 htb_charge_class(q, cl, level, skb->len);
943 static void htb_delay_by(struct Qdisc *sch, long delay)
945 struct htb_sched *q = qdisc_priv(sch);
948 if (unlikely(delay > 5 * HZ)) {
950 printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
953 /* why don't use jiffies here ? because expires can be in past */
954 mod_timer(&q->timer, q->jiffies + delay);
955 sch->flags |= TCQ_F_THROTTLED;
956 sch->qstats.overlimits++;
959 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
961 struct sk_buff *skb = NULL;
962 struct htb_sched *q = qdisc_priv(sch);
966 q->jiffies = jiffies;
968 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
969 skb = __skb_dequeue(&q->direct_queue);
971 sch->flags &= ~TCQ_F_THROTTLED;
978 PSCHED_GET_TIME(q->now);
980 min_delay = LONG_MAX;
982 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
983 /* common case optimization - skip event handler quickly */
986 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
987 delay = htb_do_events(q, level);
988 q->near_ev_cache[level] =
989 q->jiffies + (delay ? delay : HZ);
991 delay = q->near_ev_cache[level] - q->jiffies;
993 if (delay && min_delay > delay)
995 m = ~q->row_mask[level];
996 while (m != (int)(-1)) {
999 skb = htb_dequeue_tree(q, prio, level);
1000 if (likely(skb != NULL)) {
1002 sch->flags &= ~TCQ_F_THROTTLED;
1007 htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
1012 /* try to drop from each class (by prio) until one succeed */
1013 static unsigned int htb_drop(struct Qdisc *sch)
1015 struct htb_sched *q = qdisc_priv(sch);
1018 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1019 struct list_head *p;
1020 list_for_each(p, q->drops + prio) {
1021 struct htb_class *cl = list_entry(p, struct htb_class,
1024 if (cl->un.leaf.q->ops->drop &&
1025 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1027 if (!cl->un.leaf.q->q.qlen)
1028 htb_deactivate(q, cl);
1036 /* reset all classes */
1037 /* always caled under BH & queue lock */
1038 static void htb_reset(struct Qdisc *sch)
1040 struct htb_sched *q = qdisc_priv(sch);
1043 for (i = 0; i < HTB_HSIZE; i++) {
1044 struct list_head *p;
1045 list_for_each(p, q->hash + i) {
1046 struct htb_class *cl =
1047 list_entry(p, struct htb_class, hlist);
1049 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
1052 qdisc_reset(cl->un.leaf.q);
1053 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1055 cl->prio_activity = 0;
1056 cl->cmode = HTB_CAN_SEND;
1060 sch->flags &= ~TCQ_F_THROTTLED;
1061 del_timer(&q->timer);
1062 __skb_queue_purge(&q->direct_queue);
1064 memset(q->row, 0, sizeof(q->row));
1065 memset(q->row_mask, 0, sizeof(q->row_mask));
1066 memset(q->wait_pq, 0, sizeof(q->wait_pq));
1067 memset(q->ptr, 0, sizeof(q->ptr));
1068 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1069 INIT_LIST_HEAD(q->drops + i);
1072 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1074 struct htb_sched *q = qdisc_priv(sch);
1075 struct rtattr *tb[TCA_HTB_INIT];
1076 struct tc_htb_glob *gopt;
1078 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1079 tb[TCA_HTB_INIT - 1] == NULL ||
1080 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1081 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1084 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1085 if (gopt->version != HTB_VER >> 16) {
1087 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1088 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1092 INIT_LIST_HEAD(&q->root);
1093 for (i = 0; i < HTB_HSIZE; i++)
1094 INIT_LIST_HEAD(q->hash + i);
1095 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1096 INIT_LIST_HEAD(q->drops + i);
1098 init_timer(&q->timer);
1099 skb_queue_head_init(&q->direct_queue);
1101 q->direct_qlen = sch->dev->tx_queue_len;
1102 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1104 q->timer.function = htb_timer;
1105 q->timer.data = (unsigned long)sch;
1108 init_timer(&q->rttim);
1109 q->rttim.function = htb_rate_timer;
1110 q->rttim.data = (unsigned long)sch;
1111 q->rttim.expires = jiffies + HZ;
1112 add_timer(&q->rttim);
1114 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1115 q->rate2quantum = 1;
1116 q->defcls = gopt->defcls;
1121 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1123 struct htb_sched *q = qdisc_priv(sch);
1124 unsigned char *b = skb->tail;
1126 struct tc_htb_glob gopt;
1127 spin_lock_bh(&sch->dev->queue_lock);
1128 gopt.direct_pkts = q->direct_pkts;
1130 gopt.version = HTB_VER;
1131 gopt.rate2quantum = q->rate2quantum;
1132 gopt.defcls = q->defcls;
1134 rta = (struct rtattr *)b;
1135 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1136 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1137 rta->rta_len = skb->tail - b;
1138 spin_unlock_bh(&sch->dev->queue_lock);
1141 spin_unlock_bh(&sch->dev->queue_lock);
1142 skb_trim(skb, skb->tail - skb->data);
1146 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1147 struct sk_buff *skb, struct tcmsg *tcm)
1149 struct htb_class *cl = (struct htb_class *)arg;
1150 unsigned char *b = skb->tail;
1152 struct tc_htb_opt opt;
1154 spin_lock_bh(&sch->dev->queue_lock);
1155 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1156 tcm->tcm_handle = cl->classid;
1157 if (!cl->level && cl->un.leaf.q)
1158 tcm->tcm_info = cl->un.leaf.q->handle;
1160 rta = (struct rtattr *)b;
1161 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1163 memset(&opt, 0, sizeof(opt));
1165 opt.rate = cl->rate->rate;
1166 opt.buffer = cl->buffer;
1167 opt.ceil = cl->ceil->rate;
1168 opt.cbuffer = cl->cbuffer;
1169 opt.quantum = cl->un.leaf.quantum;
1170 opt.prio = cl->un.leaf.prio;
1171 opt.level = cl->level;
1172 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1173 rta->rta_len = skb->tail - b;
1174 spin_unlock_bh(&sch->dev->queue_lock);
1177 spin_unlock_bh(&sch->dev->queue_lock);
1178 skb_trim(skb, b - skb->data);
1183 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1185 struct htb_class *cl = (struct htb_class *)arg;
1188 cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
1189 cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
1192 if (!cl->level && cl->un.leaf.q)
1193 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1194 cl->xstats.tokens = cl->tokens;
1195 cl->xstats.ctokens = cl->ctokens;
1197 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1198 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1199 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1202 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1205 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1208 struct htb_class *cl = (struct htb_class *)arg;
1210 if (cl && !cl->level) {
1211 if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1216 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1217 if (cl->prio_activity)
1218 htb_deactivate(qdisc_priv(sch), cl);
1220 /* TODO: is it correct ? Why CBQ doesn't do it ? */
1221 sch->q.qlen -= (*old)->q.qlen;
1224 sch_tree_unlock(sch);
1230 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1232 struct htb_class *cl = (struct htb_class *)arg;
1233 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1236 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1238 struct htb_class *cl = htb_find(classid, sch);
1241 return (unsigned long)cl;
1244 static void htb_destroy_filters(struct tcf_proto **fl)
1246 struct tcf_proto *tp;
1248 while ((tp = *fl) != NULL) {
1254 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1256 struct htb_sched *q = qdisc_priv(sch);
1258 BUG_TRAP(cl->un.leaf.q);
1259 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1260 qdisc_destroy(cl->un.leaf.q);
1262 qdisc_put_rtab(cl->rate);
1263 qdisc_put_rtab(cl->ceil);
1265 htb_destroy_filters(&cl->filter_list);
1267 while (!list_empty(&cl->children))
1268 htb_destroy_class(sch, list_entry(cl->children.next,
1269 struct htb_class, sibling));
1271 /* note: this delete may happen twice (see htb_delete) */
1272 list_del(&cl->hlist);
1273 list_del(&cl->sibling);
1275 if (cl->prio_activity)
1276 htb_deactivate(q, cl);
1278 if (cl->cmode != HTB_CAN_SEND)
1279 rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1284 /* always caled under BH & queue lock */
1285 static void htb_destroy(struct Qdisc *sch)
1287 struct htb_sched *q = qdisc_priv(sch);
1289 del_timer_sync(&q->timer);
1291 del_timer_sync(&q->rttim);
1293 /* This line used to be after htb_destroy_class call below
1294 and surprisingly it worked in 2.4. But it must precede it
1295 because filter need its target class alive to be able to call
1296 unbind_filter on it (without Oops). */
1297 htb_destroy_filters(&q->filter_list);
1299 while (!list_empty(&q->root))
1300 htb_destroy_class(sch, list_entry(q->root.next,
1301 struct htb_class, sibling));
1303 __skb_queue_purge(&q->direct_queue);
1306 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1308 struct htb_sched *q = qdisc_priv(sch);
1309 struct htb_class *cl = (struct htb_class *)arg;
1311 // TODO: why don't allow to delete subtree ? references ? does
1312 // tc subsys quarantee us that in htb_destroy it holds no class
1313 // refs so that we can remove children safely there ?
1314 if (!list_empty(&cl->children) || cl->filter_cnt)
1319 /* delete from hash and active; remainder in destroy_class */
1320 list_del_init(&cl->hlist);
1321 if (cl->prio_activity)
1322 htb_deactivate(q, cl);
1324 if (--cl->refcnt == 0)
1325 htb_destroy_class(sch, cl);
1327 sch_tree_unlock(sch);
1331 static void htb_put(struct Qdisc *sch, unsigned long arg)
1333 struct htb_class *cl = (struct htb_class *)arg;
1335 if (--cl->refcnt == 0)
1336 htb_destroy_class(sch, cl);
1339 static int htb_change_class(struct Qdisc *sch, u32 classid,
1340 u32 parentid, struct rtattr **tca,
1344 struct htb_sched *q = qdisc_priv(sch);
1345 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1346 struct rtattr *opt = tca[TCA_OPTIONS - 1];
1347 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1348 struct rtattr *tb[TCA_HTB_RTAB];
1349 struct tc_htb_opt *hopt;
1351 /* extract all subattrs from opt attr */
1352 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1353 tb[TCA_HTB_PARMS - 1] == NULL ||
1354 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1357 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1359 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1361 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1362 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1366 if (!cl) { /* new class */
1367 struct Qdisc *new_q;
1368 /* check for valid classid */
1369 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1370 || htb_find(classid, sch))
1373 /* check maximal depth */
1374 if (parent && parent->parent && parent->parent->level < 2) {
1375 printk(KERN_ERR "htb: tree is too deep\n");
1379 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1383 INIT_LIST_HEAD(&cl->sibling);
1384 INIT_LIST_HEAD(&cl->hlist);
1385 INIT_LIST_HEAD(&cl->children);
1386 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1388 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1389 so that can't be used inside of sch_tree_lock
1390 -- thanks to Karlis Peisenieks */
1391 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1393 if (parent && !parent->level) {
1394 /* turn parent into inner node */
1395 sch->q.qlen -= parent->un.leaf.q->q.qlen;
1396 qdisc_destroy(parent->un.leaf.q);
1397 if (parent->prio_activity)
1398 htb_deactivate(q, parent);
1400 /* remove from evt list because of level change */
1401 if (parent->cmode != HTB_CAN_SEND) {
1402 rb_erase(&parent->pq_node, q->wait_pq);
1403 parent->cmode = HTB_CAN_SEND;
1405 parent->level = (parent->parent ? parent->parent->level
1406 : TC_HTB_MAXDEPTH) - 1;
1407 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1409 /* leaf (we) needs elementary qdisc */
1410 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1412 cl->classid = classid;
1413 cl->parent = parent;
1415 /* set class to be in HTB_CAN_SEND state */
1416 cl->tokens = hopt->buffer;
1417 cl->ctokens = hopt->cbuffer;
1418 cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
1419 PSCHED_GET_TIME(cl->t_c);
1420 cl->cmode = HTB_CAN_SEND;
1422 /* attach to the hash list and parent's family */
1423 list_add_tail(&cl->hlist, q->hash + htb_hash(classid));
1424 list_add_tail(&cl->sibling,
1425 parent ? &parent->children : &q->root);
1429 /* it used to be a nasty bug here, we have to check that node
1430 is really leaf before changing cl->un.leaf ! */
1432 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1433 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1435 "HTB: quantum of class %X is small. Consider r2q change.\n",
1437 cl->un.leaf.quantum = 1000;
1439 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1441 "HTB: quantum of class %X is big. Consider r2q change.\n",
1443 cl->un.leaf.quantum = 200000;
1446 cl->un.leaf.quantum = hopt->quantum;
1447 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1448 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1451 cl->buffer = hopt->buffer;
1452 cl->cbuffer = hopt->cbuffer;
1454 qdisc_put_rtab(cl->rate);
1457 qdisc_put_rtab(cl->ceil);
1459 sch_tree_unlock(sch);
1461 *arg = (unsigned long)cl;
1466 qdisc_put_rtab(rtab);
1468 qdisc_put_rtab(ctab);
1472 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1474 struct htb_sched *q = qdisc_priv(sch);
1475 struct htb_class *cl = (struct htb_class *)arg;
1476 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1481 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1484 struct htb_sched *q = qdisc_priv(sch);
1485 struct htb_class *cl = htb_find(classid, sch);
1487 /*if (cl && !cl->level) return 0;
1488 The line above used to be there to prevent attaching filters to
1489 leaves. But at least tc_index filter uses this just to get class
1490 for other reasons so that we have to allow for it.
1492 19.6.2002 As Werner explained it is ok - bind filter is just
1493 another way to "lock" the class - unlike "get" this lock can
1494 be broken by class during destroy IIUC.
1500 return (unsigned long)cl;
1503 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1505 struct htb_sched *q = qdisc_priv(sch);
1506 struct htb_class *cl = (struct htb_class *)arg;
1514 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1516 struct htb_sched *q = qdisc_priv(sch);
1522 for (i = 0; i < HTB_HSIZE; i++) {
1523 struct list_head *p;
1524 list_for_each(p, q->hash + i) {
1525 struct htb_class *cl =
1526 list_entry(p, struct htb_class, hlist);
1527 if (arg->count < arg->skip) {
1531 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1540 static struct Qdisc_class_ops htb_class_ops = {
1545 .change = htb_change_class,
1546 .delete = htb_delete,
1548 .tcf_chain = htb_find_tcf,
1549 .bind_tcf = htb_bind_filter,
1550 .unbind_tcf = htb_unbind_filter,
1551 .dump = htb_dump_class,
1552 .dump_stats = htb_dump_class_stats,
1555 static struct Qdisc_ops htb_qdisc_ops = {
1557 .cl_ops = &htb_class_ops,
1559 .priv_size = sizeof(struct htb_sched),
1560 .enqueue = htb_enqueue,
1561 .dequeue = htb_dequeue,
1562 .requeue = htb_requeue,
1566 .destroy = htb_destroy,
1567 .change = NULL /* htb_change */,
1569 .owner = THIS_MODULE,
1572 static int __init htb_module_init(void)
1574 return register_qdisc(&htb_qdisc_ops);
1576 static void __exit htb_module_exit(void)
1578 unregister_qdisc(&htb_qdisc_ops);
1581 module_init(htb_module_init)
1582 module_exit(htb_module_exit)
1583 MODULE_LICENSE("GPL");