kernel_optimize_test/net/ipv4/ipvs/ip_vs_wlc.c
Linus Torvalds 1da177e4c3 Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
2005-04-16 15:20:36 -07:00

152 lines
3.9 KiB
C

/*
* IPVS: Weighted Least-Connection Scheduling module
*
* Version: $Id: ip_vs_wlc.c,v 1.13 2003/04/18 09:03:16 wensong Exp $
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
* Wensong Zhang : changed the ip_vs_wlc_schedule to return dest
* Wensong Zhang : changed to use the inactconns in scheduling
* Wensong Zhang : changed some comestics things for debugging
* Wensong Zhang : changed for the d-linked destination list
* Wensong Zhang : added the ip_vs_wlc_update_svc
* Wensong Zhang : added any dest with weight=0 is quiesced
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/ip_vs.h>
static int
ip_vs_wlc_init_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_wlc_done_svc(struct ip_vs_service *svc)
{
return 0;
}
static int
ip_vs_wlc_update_svc(struct ip_vs_service *svc)
{
return 0;
}
static inline unsigned int
ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return (atomic_read(&dest->activeconns) << 8) +
atomic_read(&dest->inactconns);
}
/*
* Weighted Least Connection scheduling
*/
static struct ip_vs_dest *
ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
{
struct ip_vs_dest *dest, *least;
unsigned int loh, doh;
IP_VS_DBG(6, "ip_vs_wlc_schedule(): Scheduling...\n");
/*
* We calculate the load of each dest server as follows:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
* The comparison of h1*w2 > h2*w1 is equivalent to that of
* h1/w1 > h2/w2
* if every weight is larger than zero.
*
* The server with weight=0 is quiesced and will not receive any
* new connections.
*/
list_for_each_entry(dest, &svc->destinations, n_list) {
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_wlc_dest_overhead(least);
goto nextstage;
}
}
return NULL;
/*
* Find the destination with the least load.
*/
nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_wlc_dest_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;
loh = doh;
}
}
IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
NIPQUAD(least->addr), ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->refcnt),
atomic_read(&least->weight), loh);
return least;
}
static struct ip_vs_scheduler ip_vs_wlc_scheduler =
{
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc,
.schedule = ip_vs_wlc_schedule,
};
static int __init ip_vs_wlc_init(void)
{
INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
static void __exit ip_vs_wlc_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
module_init(ip_vs_wlc_init);
module_exit(ip_vs_wlc_cleanup);
MODULE_LICENSE("GPL");