kernel_optimize_test/security/selinux/ss/hashtab.c
Ondrej Mosnacek e0ac568de1 selinux: reduce the use of hard-coded hash sizes
Instead allocate hash tables with just the right size based on the
actual number of elements (which is almost always known beforehand, we
just need to defer the hashtab allocation to the right time). The only
case when we don't know the size (with the current policy format) is the
new filename transitions hashtable. Here I just left the existing value.

After this patch, the time to load Fedora policy on x86_64 decreases
from 790 ms to 167 ms. If the unconfined module is removed, it decreases
from 750 ms to 122 ms. It is also likely that other operations are going
to be faster, mainly string_to_context_struct() or mls_compute_sid(),
but I didn't try to quantify that.

The memory usage of all hash table arrays increases from ~58 KB to
~163 KB (with Fedora policy on x86_64).

Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com>
Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
Signed-off-by: Paul Moore <paul@paul-moore.com>
2020-02-27 19:23:20 -05:00

199 lines
4.0 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Implementation of the hash table type.
*
* Author : Stephen Smalley, <sds@tycho.nsa.gov>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include "hashtab.h"
static struct kmem_cache *hashtab_node_cachep;
/*
* Here we simply round the number of elements up to the nearest power of two.
* I tried also other options like rouding down or rounding to the closest
* power of two (up or down based on which is closer), but I was unable to
* find any significant difference in lookup/insert performance that would
* justify switching to a different (less intuitive) formula. It could be that
* a different formula is actually more optimal, but any future changes here
* should be supported with performance/memory usage data.
*
* The total memory used by the htable arrays (only) with Fedora policy loaded
* is approximately 163 KB at the time of writing.
*/
static u32 hashtab_compute_size(u32 nel)
{
return nel == 0 ? 0 : roundup_pow_of_two(nel);
}
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
u32 nel_hint)
{
struct hashtab *p;
u32 i, size = hashtab_compute_size(nel_hint);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return p;
p->size = size;
p->nel = 0;
p->hash_value = hash_value;
p->keycmp = keycmp;
if (!size)
return p;
p->htable = kmalloc_array(size, sizeof(*p->htable), GFP_KERNEL);
if (!p->htable) {
kfree(p);
return NULL;
}
for (i = 0; i < size; i++)
p->htable[i] = NULL;
return p;
}
int hashtab_insert(struct hashtab *h, void *key, void *datum)
{
u32 hvalue;
struct hashtab_node *prev, *cur, *newnode;
cond_resched();
if (!h || !h->size || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
hvalue = h->hash_value(h, key);
prev = NULL;
cur = h->htable[hvalue];
while (cur && h->keycmp(h, key, cur->key) > 0) {
prev = cur;
cur = cur->next;
}
if (cur && (h->keycmp(h, key, cur->key) == 0))
return -EEXIST;
newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
if (!newnode)
return -ENOMEM;
newnode->key = key;
newnode->datum = datum;
if (prev) {
newnode->next = prev->next;
prev->next = newnode;
} else {
newnode->next = h->htable[hvalue];
h->htable[hvalue] = newnode;
}
h->nel++;
return 0;
}
void *hashtab_search(struct hashtab *h, const void *key)
{
u32 hvalue;
struct hashtab_node *cur;
if (!h || !h->size)
return NULL;
hvalue = h->hash_value(h, key);
cur = h->htable[hvalue];
while (cur && h->keycmp(h, key, cur->key) > 0)
cur = cur->next;
if (!cur || (h->keycmp(h, key, cur->key) != 0))
return NULL;
return cur->datum;
}
void hashtab_destroy(struct hashtab *h)
{
u32 i;
struct hashtab_node *cur, *temp;
if (!h)
return;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
kmem_cache_free(hashtab_node_cachep, temp);
}
h->htable[i] = NULL;
}
kfree(h->htable);
h->htable = NULL;
kfree(h);
}
int hashtab_map(struct hashtab *h,
int (*apply)(void *k, void *d, void *args),
void *args)
{
u32 i;
int ret;
struct hashtab_node *cur;
if (!h)
return 0;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
ret = apply(cur->key, cur->datum, args);
if (ret)
return ret;
cur = cur->next;
}
}
return 0;
}
void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
{
u32 i, chain_len, slots_used, max_chain_len;
struct hashtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
info->slots_used = slots_used;
info->max_chain_len = max_chain_len;
}
void __init hashtab_cache_init(void)
{
hashtab_node_cachep = kmem_cache_create("hashtab_node",
sizeof(struct hashtab_node),
0, SLAB_PANIC, NULL);
}