2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
56 #include <net/ip_vs.h>
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
67 * It is for full expiration check.
68 * When there is no partial expiration check (garbage collection)
69 * in a half hour, do a full expiration check to collect stale
70 * entries that haven't been touched for a day.
72 #define COUNT_FOR_FULL_EXPIRATION 30
75 * for IPVS lblcr entry hash table
77 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
80 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
81 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
82 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
86 * IPVS destination set structure and operations
88 struct ip_vs_dest_set_elem {
89 struct list_head list; /* list link */
90 struct ip_vs_dest *dest; /* destination server */
93 struct ip_vs_dest_set {
94 atomic_t size; /* set size */
95 unsigned long lastmod; /* last modified time */
96 struct list_head list; /* destination list */
97 rwlock_t lock; /* lock for this list */
101 static struct ip_vs_dest_set_elem *
102 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
104 struct ip_vs_dest_set_elem *e;
106 list_for_each_entry(e, &set->list, list) {
108 /* already existed */
112 e = kmalloc(sizeof(*e), GFP_ATOMIC);
114 pr_err("%s(): no memory\n", __func__);
118 atomic_inc(&dest->refcnt);
121 list_add(&e->list, &set->list);
122 atomic_inc(&set->size);
124 set->lastmod = jiffies;
129 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131 struct ip_vs_dest_set_elem *e;
133 list_for_each_entry(e, &set->list, list) {
134 if (e->dest == dest) {
136 atomic_dec(&set->size);
137 set->lastmod = jiffies;
138 atomic_dec(&e->dest->refcnt);
146 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
148 struct ip_vs_dest_set_elem *e, *ep;
150 write_lock(&set->lock);
151 list_for_each_entry_safe(e, ep, &set->list, list) {
153 * We don't kfree dest because it is refered either
154 * by its service or by the trash dest list.
156 atomic_dec(&e->dest->refcnt);
160 write_unlock(&set->lock);
163 /* get weighted least-connection node in the destination set */
164 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
166 register struct ip_vs_dest_set_elem *e;
167 struct ip_vs_dest *dest, *least;
173 /* select the first destination server, whose weight > 0 */
174 list_for_each_entry(e, &set->list, list) {
176 if (least->flags & IP_VS_DEST_F_OVERLOAD)
179 if ((atomic_read(&least->weight) > 0)
180 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
181 loh = atomic_read(&least->activeconns) * 50
182 + atomic_read(&least->inactconns);
188 /* find the destination with the weighted least load */
190 list_for_each_entry(e, &set->list, list) {
192 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
195 doh = atomic_read(&dest->activeconns) * 50
196 + atomic_read(&dest->inactconns);
197 if ((loh * atomic_read(&dest->weight) >
198 doh * atomic_read(&least->weight))
199 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
205 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
206 "activeconns %d refcnt %d weight %d overhead %d\n",
208 IP_VS_DBG_ADDR(least->af, &least->addr),
210 atomic_read(&least->activeconns),
211 atomic_read(&least->refcnt),
212 atomic_read(&least->weight), loh);
217 /* get weighted most-connection node in the destination set */
218 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
220 register struct ip_vs_dest_set_elem *e;
221 struct ip_vs_dest *dest, *most;
227 /* select the first destination server, whose weight > 0 */
228 list_for_each_entry(e, &set->list, list) {
230 if (atomic_read(&most->weight) > 0) {
231 moh = atomic_read(&most->activeconns) * 50
232 + atomic_read(&most->inactconns);
238 /* find the destination with the weighted most load */
240 list_for_each_entry(e, &set->list, list) {
242 doh = atomic_read(&dest->activeconns) * 50
243 + atomic_read(&dest->inactconns);
244 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
245 if ((moh * atomic_read(&dest->weight) <
246 doh * atomic_read(&most->weight))
247 && (atomic_read(&dest->weight) > 0)) {
253 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
254 "activeconns %d refcnt %d weight %d overhead %d\n",
256 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
257 atomic_read(&most->activeconns),
258 atomic_read(&most->refcnt),
259 atomic_read(&most->weight), moh);
265 * IPVS lblcr entry represents an association between destination
266 * IP address and its destination server set
268 struct ip_vs_lblcr_entry {
269 struct list_head list;
270 int af; /* address family */
271 union nf_inet_addr addr; /* destination IP address */
272 struct ip_vs_dest_set set; /* destination server set */
273 unsigned long lastuse; /* last used time */
278 * IPVS lblcr hash table
280 struct ip_vs_lblcr_table {
281 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
282 atomic_t entries; /* number of entries */
283 int max_size; /* maximum size of entries */
284 struct timer_list periodic_timer; /* collect stale entries */
285 int rover; /* rover for expire check */
286 int counter; /* counter for no expire */
291 * IPVS LBLCR sysctl table
294 static ctl_table vs_vars_table[] = {
296 .procname = "lblcr_expiration",
298 .maxlen = sizeof(int),
300 .proc_handler = proc_dointvec_jiffies,
305 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
308 ip_vs_dest_set_eraseall(&en->set);
314 * Returns hash value for IPVS LBLCR entry
316 static inline unsigned
317 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
319 __be32 addr_fold = addr->ip;
321 #ifdef CONFIG_IP_VS_IPV6
323 addr_fold = addr->ip6[0]^addr->ip6[1]^
324 addr->ip6[2]^addr->ip6[3];
326 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
331 * Hash an entry in the ip_vs_lblcr_table.
332 * returns bool success.
335 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
337 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
339 list_add(&en->list, &tbl->bucket[hash]);
340 atomic_inc(&tbl->entries);
345 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
348 static inline struct ip_vs_lblcr_entry *
349 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
350 const union nf_inet_addr *addr)
352 unsigned hash = ip_vs_lblcr_hashkey(af, addr);
353 struct ip_vs_lblcr_entry *en;
355 list_for_each_entry(en, &tbl->bucket[hash], list)
356 if (ip_vs_addr_equal(af, &en->addr, addr))
364 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
365 * IP address to a server. Called under write lock.
367 static inline struct ip_vs_lblcr_entry *
368 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
369 struct ip_vs_dest *dest)
371 struct ip_vs_lblcr_entry *en;
373 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
375 en = kmalloc(sizeof(*en), GFP_ATOMIC);
377 pr_err("%s(): no memory\n", __func__);
382 ip_vs_addr_copy(dest->af, &en->addr, daddr);
383 en->lastuse = jiffies;
385 /* initialize its dest set */
386 atomic_set(&(en->set.size), 0);
387 INIT_LIST_HEAD(&en->set.list);
388 rwlock_init(&en->set.lock);
390 ip_vs_lblcr_hash(tbl, en);
393 write_lock(&en->set.lock);
394 ip_vs_dest_set_insert(&en->set, dest);
395 write_unlock(&en->set.lock);
402 * Flush all the entries of the specified table.
404 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
407 struct ip_vs_lblcr_entry *en, *nxt;
409 /* No locking required, only called during cleanup. */
410 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
411 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
412 ip_vs_lblcr_free(en);
418 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
420 struct ip_vs_lblcr_table *tbl = svc->sched_data;
421 unsigned long now = jiffies;
423 struct ip_vs_lblcr_entry *en, *nxt;
424 struct netns_ipvs *ipvs = net_ipvs(svc->net);
426 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
427 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
429 write_lock(&svc->sched_lock);
430 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
431 if (time_after(en->lastuse
432 + ipvs->sysctl_lblcr_expiration, now))
435 ip_vs_lblcr_free(en);
436 atomic_dec(&tbl->entries);
438 write_unlock(&svc->sched_lock);
445 * Periodical timer handler for IPVS lblcr table
446 * It is used to collect stale entries when the number of entries
447 * exceeds the maximum size of the table.
449 * Fixme: we probably need more complicated algorithm to collect
450 * entries that have not been used for a long time even
451 * if the number of entries doesn't exceed the maximum size
453 * The full expiration check is for this purpose now.
455 static void ip_vs_lblcr_check_expire(unsigned long data)
457 struct ip_vs_service *svc = (struct ip_vs_service *) data;
458 struct ip_vs_lblcr_table *tbl = svc->sched_data;
459 unsigned long now = jiffies;
462 struct ip_vs_lblcr_entry *en, *nxt;
464 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
465 /* do full expiration check */
466 ip_vs_lblcr_full_check(svc);
471 if (atomic_read(&tbl->entries) <= tbl->max_size) {
476 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
477 if (goal > tbl->max_size/2)
478 goal = tbl->max_size/2;
480 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
481 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
483 write_lock(&svc->sched_lock);
484 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
485 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
488 ip_vs_lblcr_free(en);
489 atomic_dec(&tbl->entries);
492 write_unlock(&svc->sched_lock);
499 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
502 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
505 struct ip_vs_lblcr_table *tbl;
508 * Allocate the ip_vs_lblcr_table for this service
510 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
512 pr_err("%s(): no memory\n", __func__);
515 svc->sched_data = tbl;
516 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
517 "current service\n", sizeof(*tbl));
520 * Initialize the hash buckets
522 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
523 INIT_LIST_HEAD(&tbl->bucket[i]);
525 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
530 * Hook periodic timer for garbage collection
532 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
534 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
540 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
542 struct ip_vs_lblcr_table *tbl = svc->sched_data;
544 /* remove periodic timer */
545 del_timer_sync(&tbl->periodic_timer);
547 /* got to clean up table entries here */
548 ip_vs_lblcr_flush(tbl);
550 /* release the table itself */
552 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
559 static inline struct ip_vs_dest *
560 __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
562 struct ip_vs_dest *dest, *least;
566 * We think the overhead of processing active connections is fifty
567 * times higher than that of inactive connections in average. (This
568 * fifty times might not be accurate, we will change it later.) We
569 * use the following formula to estimate the overhead:
570 * dest->activeconns*50 + dest->inactconns
572 * (dest overhead) / dest->weight
574 * Remember -- no floats in kernel mode!!!
575 * The comparison of h1*w2 > h2*w1 is equivalent to that of
577 * if every weight is larger than zero.
579 * The server with weight=0 is quiesced and will not receive any
582 list_for_each_entry(dest, &svc->destinations, n_list) {
583 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
586 if (atomic_read(&dest->weight) > 0) {
588 loh = atomic_read(&least->activeconns) * 50
589 + atomic_read(&least->inactconns);
596 * Find the destination with the least load.
599 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
600 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
603 doh = atomic_read(&dest->activeconns) * 50
604 + atomic_read(&dest->inactconns);
605 if (loh * atomic_read(&dest->weight) >
606 doh * atomic_read(&least->weight)) {
612 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
613 "activeconns %d refcnt %d weight %d overhead %d\n",
614 IP_VS_DBG_ADDR(least->af, &least->addr),
616 atomic_read(&least->activeconns),
617 atomic_read(&least->refcnt),
618 atomic_read(&least->weight), loh);
625 * If this destination server is overloaded and there is a less loaded
626 * server, then return true.
629 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
631 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
632 struct ip_vs_dest *d;
634 list_for_each_entry(d, &svc->destinations, n_list) {
635 if (atomic_read(&d->activeconns)*2
636 < atomic_read(&d->weight)) {
646 * Locality-Based (weighted) Least-Connection scheduling
648 static struct ip_vs_dest *
649 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
651 struct ip_vs_lblcr_table *tbl = svc->sched_data;
652 struct ip_vs_iphdr iph;
653 struct ip_vs_dest *dest = NULL;
654 struct ip_vs_lblcr_entry *en;
656 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
658 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
660 /* First look in our cache */
661 read_lock(&svc->sched_lock);
662 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
664 struct netns_ipvs *ipvs = net_ipvs(svc->net);
665 /* We only hold a read lock, but this is atomic */
666 en->lastuse = jiffies;
668 /* Get the least loaded destination */
669 read_lock(&en->set.lock);
670 dest = ip_vs_dest_set_min(&en->set);
671 read_unlock(&en->set.lock);
673 /* More than one destination + enough time passed by, cleanup */
674 if (atomic_read(&en->set.size) > 1 &&
675 time_after(jiffies, en->set.lastmod +
676 ipvs->sysctl_lblcr_expiration)) {
677 struct ip_vs_dest *m;
679 write_lock(&en->set.lock);
680 m = ip_vs_dest_set_max(&en->set);
682 ip_vs_dest_set_erase(&en->set, m);
683 write_unlock(&en->set.lock);
686 /* If the destination is not overloaded, use it */
687 if (dest && !is_overloaded(dest, svc)) {
688 read_unlock(&svc->sched_lock);
692 /* The cache entry is invalid, time to schedule */
693 dest = __ip_vs_lblcr_schedule(svc);
695 IP_VS_ERR_RL("LBLCR: no destination available\n");
696 read_unlock(&svc->sched_lock);
700 /* Update our cache entry */
701 write_lock(&en->set.lock);
702 ip_vs_dest_set_insert(&en->set, dest);
703 write_unlock(&en->set.lock);
705 read_unlock(&svc->sched_lock);
710 /* No cache entry, time to schedule */
711 dest = __ip_vs_lblcr_schedule(svc);
713 IP_VS_DBG(1, "no destination available\n");
717 /* If we fail to create a cache entry, we'll just use the valid dest */
718 write_lock(&svc->sched_lock);
719 ip_vs_lblcr_new(tbl, &iph.daddr, dest);
720 write_unlock(&svc->sched_lock);
723 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
724 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
725 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
732 * IPVS LBLCR Scheduler structure
734 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
737 .refcnt = ATOMIC_INIT(0),
738 .module = THIS_MODULE,
739 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
740 .init_service = ip_vs_lblcr_init_svc,
741 .done_service = ip_vs_lblcr_done_svc,
742 .schedule = ip_vs_lblcr_schedule,
748 static int __net_init __ip_vs_lblcr_init(struct net *net)
750 struct netns_ipvs *ipvs = net_ipvs(net);
752 if (!net_eq(net, &init_net)) {
753 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
754 sizeof(vs_vars_table),
756 if (ipvs->lblcr_ctl_table == NULL)
759 ipvs->lblcr_ctl_table = vs_vars_table;
760 ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
761 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
764 ipvs->lblcr_ctl_header =
765 register_net_sysctl_table(net, net_vs_ctl_path,
766 ipvs->lblcr_ctl_table);
767 if (!ipvs->lblcr_ctl_header) {
768 if (!net_eq(net, &init_net))
769 kfree(ipvs->lblcr_ctl_table);
777 static void __net_exit __ip_vs_lblcr_exit(struct net *net)
779 struct netns_ipvs *ipvs = net_ipvs(net);
782 unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
785 if (!net_eq(net, &init_net))
786 kfree(ipvs->lblcr_ctl_table);
789 static struct pernet_operations ip_vs_lblcr_ops = {
790 .init = __ip_vs_lblcr_init,
791 .exit = __ip_vs_lblcr_exit,
794 static int __init ip_vs_lblcr_init(void)
798 ret = register_pernet_subsys(&ip_vs_lblcr_ops);
802 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
804 unregister_pernet_subsys(&ip_vs_lblcr_ops);
808 static void __exit ip_vs_lblcr_cleanup(void)
810 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
811 unregister_pernet_subsys(&ip_vs_lblcr_ops);
815 module_init(ip_vs_lblcr_init);
816 module_exit(ip_vs_lblcr_cleanup);
817 MODULE_LICENSE("GPL");