forked from luck/tmp_suning_uos_patched
IPVS: netns awarness to lblcr sheduler
var sysctl_ip_vs_lblcr_expiration moved to ipvs struct as sysctl_lblcr_expiration procfs updated to handle this. Signed-off-by: Hans Schillstrom <hans.schillstrom@ericsson.com> Acked-by: Julian Anastasov <ja@ssi.bg> Signed-off-by: Simon Horman <horms@verge.net.au>
This commit is contained in:
parent
fc723250c9
commit
d0a1eef9c3
@ -28,6 +28,11 @@ struct netns_ipvs {
|
||||
#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
|
||||
|
||||
struct list_head rs_table[IP_VS_RTAB_SIZE];
|
||||
|
||||
/* ip_vs_lblcr */
|
||||
int sysctl_lblcr_expiration;
|
||||
struct ctl_table_header *lblcr_ctl_header;
|
||||
struct ctl_table *lblcr_ctl_table;
|
||||
};
|
||||
|
||||
#endif /* IP_VS_H_ */
|
||||
|
@ -70,8 +70,6 @@
|
||||
* entries that haven't been touched for a day.
|
||||
*/
|
||||
#define COUNT_FOR_FULL_EXPIRATION 30
|
||||
static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
|
||||
|
||||
|
||||
/*
|
||||
* for IPVS lblcr entry hash table
|
||||
@ -296,7 +294,7 @@ struct ip_vs_lblcr_table {
|
||||
static ctl_table vs_vars_table[] = {
|
||||
{
|
||||
.procname = "lblcr_expiration",
|
||||
.data = &sysctl_ip_vs_lblcr_expiration,
|
||||
.data = NULL,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
@ -304,8 +302,6 @@ static ctl_table vs_vars_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct ctl_table_header * sysctl_header;
|
||||
|
||||
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
|
||||
{
|
||||
list_del(&en->list);
|
||||
@ -425,14 +421,15 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
|
||||
unsigned long now = jiffies;
|
||||
int i, j;
|
||||
struct ip_vs_lblcr_entry *en, *nxt;
|
||||
struct netns_ipvs *ipvs = net_ipvs(svc->net);
|
||||
|
||||
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
|
||||
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
|
||||
|
||||
write_lock(&svc->sched_lock);
|
||||
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
|
||||
if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
|
||||
now))
|
||||
if (time_after(en->lastuse
|
||||
+ ipvs->sysctl_lblcr_expiration, now))
|
||||
continue;
|
||||
|
||||
ip_vs_lblcr_free(en);
|
||||
@ -664,6 +661,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
||||
read_lock(&svc->sched_lock);
|
||||
en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
|
||||
if (en) {
|
||||
struct netns_ipvs *ipvs = net_ipvs(svc->net);
|
||||
/* We only hold a read lock, but this is atomic */
|
||||
en->lastuse = jiffies;
|
||||
|
||||
@ -675,7 +673,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
|
||||
/* More than one destination + enough time passed by, cleanup */
|
||||
if (atomic_read(&en->set.size) > 1 &&
|
||||
time_after(jiffies, en->set.lastmod +
|
||||
sysctl_ip_vs_lblcr_expiration)) {
|
||||
ipvs->sysctl_lblcr_expiration)) {
|
||||
struct ip_vs_dest *m;
|
||||
|
||||
write_lock(&en->set.lock);
|
||||
@ -749,23 +747,43 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
|
||||
*/
|
||||
static int __net_init __ip_vs_lblcr_init(struct net *net)
|
||||
{
|
||||
if (!net_eq(net, &init_net)) /* netns not enabled yet */
|
||||
return -EPERM;
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
sysctl_header = register_net_sysctl_table(net, net_vs_ctl_path,
|
||||
vs_vars_table);
|
||||
if (!sysctl_header)
|
||||
return -ENOMEM;
|
||||
if (!net_eq(net, &init_net)) {
|
||||
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
|
||||
sizeof(vs_vars_table),
|
||||
GFP_KERNEL);
|
||||
if (ipvs->lblcr_ctl_table == NULL)
|
||||
goto err_dup;
|
||||
} else
|
||||
ipvs->lblcr_ctl_table = vs_vars_table;
|
||||
ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
|
||||
ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
|
||||
|
||||
ipvs->lblcr_ctl_header =
|
||||
register_net_sysctl_table(net, net_vs_ctl_path,
|
||||
ipvs->lblcr_ctl_table);
|
||||
if (!ipvs->lblcr_ctl_header)
|
||||
goto err_reg;
|
||||
|
||||
return 0;
|
||||
|
||||
err_reg:
|
||||
if (!net_eq(net, &init_net))
|
||||
kfree(ipvs->lblcr_ctl_table);
|
||||
|
||||
err_dup:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_lblcr_exit(struct net *net)
|
||||
{
|
||||
if (!net_eq(net, &init_net)) /* netns not enabled yet */
|
||||
return;
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
unregister_net_sysctl_table(sysctl_header);
|
||||
unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
|
||||
|
||||
if (!net_eq(net, &init_net))
|
||||
kfree(ipvs->lblcr_ctl_table);
|
||||
}
|
||||
|
||||
static struct pernet_operations ip_vs_lblcr_ops = {
|
||||
|
Loading…
Reference in New Issue
Block a user