kernel_optimize_test/security/selinux/ss/sidtab.h
Stephen Smalley 02a52c5c8c selinux: move policy commit after updating selinuxfs
With the refactoring of the policy load logic in the security
server from the previous change, it is now possible to split out
the committing of the new policy from security_load_policy() and
perform it only after successful updating of selinuxfs.  Change
security_load_policy() to return the newly populated policy
data structures to the caller, export selinux_policy_commit()
for external callers, and introduce selinux_policy_cancel() to
provide a way to cancel the policy load in the event of an error
during updating of the selinuxfs directory tree.  Further, rework
the interfaces used by selinuxfs to get information from the policy
when creating the new directory tree to take and act upon the
new policy data structure rather than the current/active policy.
Update selinuxfs to use these updated and new interfaces.  While
we are here, stop re-creating the policy_capabilities directory
on each policy load since it does not depend on the policy, and
stop trying to create the booleans and classes directories during
the initial creation of selinuxfs since no information is available
until first policy load.

After this change, a failure while updating the booleans and class
directories will cause the entire policy load to be canceled, leaving
the original policy intact, and policy load notifications to userspace
will only happen after a successful completion of updating those
directories.  This does not (yet) provide full atomicity with respect
to the updating of the directory trees themselves.

Signed-off-by: Stephen Smalley <stephen.smalley.work@gmail.com>
Signed-off-by: Paul Moore <paul@paul-moore.com>
2020-08-17 20:50:22 -04:00

156 lines
4.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* A security identifier table (sidtab) is a lookup table
* of security context structures indexed by SID value.
*
* Original author: Stephen Smalley, <sds@tycho.nsa.gov>
* Author: Ondrej Mosnacek, <omosnacek@gmail.com>
*
* Copyright (C) 2018 Red Hat, Inc.
*/
#ifndef _SS_SIDTAB_H_
#define _SS_SIDTAB_H_
#include <linux/spinlock_types.h>
#include <linux/log2.h>
#include <linux/hashtable.h>
#include "context.h"
struct sidtab_entry {
u32 sid;
u32 hash;
struct context context;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
struct sidtab_str_cache __rcu *cache;
#endif
struct hlist_node list;
};
union sidtab_entry_inner {
struct sidtab_node_inner *ptr_inner;
struct sidtab_node_leaf *ptr_leaf;
};
/* align node size to page boundary */
#define SIDTAB_NODE_ALLOC_SHIFT PAGE_SHIFT
#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
#define size_to_shift(size) ((size) == 1 ? 1 : (const_ilog2((size) - 1) + 1))
#define SIDTAB_INNER_SHIFT \
(SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
#define SIDTAB_LEAF_ENTRIES \
(SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry))
#define SIDTAB_MAX_BITS 32
#define SIDTAB_MAX U32_MAX
/* ensure enough tree levels for SIDTAB_MAX entries */
#define SIDTAB_MAX_LEVEL \
DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
SIDTAB_INNER_SHIFT)
struct sidtab_node_leaf {
struct sidtab_entry entries[SIDTAB_LEAF_ENTRIES];
};
struct sidtab_node_inner {
union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES];
};
struct sidtab_isid_entry {
int set;
struct sidtab_entry entry;
};
struct sidtab_convert_params {
int (*func)(struct context *oldc, struct context *newc, void *args);
void *args;
struct sidtab *target;
};
#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
struct sidtab {
/*
* lock-free read access only for as many items as a prior read of
* 'count'
*/
union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
/*
* access atomically via {READ|WRITE}_ONCE(); only increment under
* spinlock
*/
u32 count;
/* access only under spinlock */
struct sidtab_convert_params *convert;
spinlock_t lock;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
/* SID -> context string cache */
u32 cache_free_slots;
struct list_head cache_lru_list;
spinlock_t cache_lock;
#endif
/* index == SID - 1 (no entry for SECSID_NULL) */
struct sidtab_isid_entry isids[SECINITSID_NUM];
/* Hash table for fast reverse context-to-sid lookups. */
DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
};
int sidtab_init(struct sidtab *s);
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid);
struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid);
static inline struct context *sidtab_search(struct sidtab *s, u32 sid)
{
struct sidtab_entry *entry = sidtab_search_entry(s, sid);
return entry ? &entry->context : NULL;
}
static inline struct context *sidtab_search_force(struct sidtab *s, u32 sid)
{
struct sidtab_entry *entry = sidtab_search_entry_force(s, sid);
return entry ? &entry->context : NULL;
}
int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
void sidtab_cancel_convert(struct sidtab *s);
int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s);
int sidtab_hash_stats(struct sidtab *sidtab, char *page);
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
const char *str, u32 str_len);
int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
char **out, u32 *out_len);
#else
static inline void sidtab_sid2str_put(struct sidtab *s,
struct sidtab_entry *entry,
const char *str, u32 str_len)
{
}
static inline int sidtab_sid2str_get(struct sidtab *s,
struct sidtab_entry *entry,
char **out, u32 *out_len)
{
return -ENOENT;
}
#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
#endif /* _SS_SIDTAB_H_ */