forked from luck/tmp_suning_uos_patched
f0b07bb151
rtnl_lock() is used everywhere, and contention is very high. When someone wants to iterate over alive net namespaces, he/she has no a possibility to do that without exclusive lock. But the exclusive rtnl_lock() in such places is overkill, and it just increases the contention. Yes, there is already for_each_net_rcu() in kernel, but it requires rcu_read_lock(), and this can't be sleepable. Also, sometimes it may be need really prevent net_namespace_list growth, so for_each_net_rcu() is not fit there. This patch introduces new rw_semaphore, which will be used instead of rtnl_mutex to protect net_namespace_list. It is sleepable and allows not-exclusive iterations over net namespaces list. It allows to stop using rtnl_lock() in several places (what is made in next patches) and makes less the time, we keep rtnl_mutex. Here we just add new lock, while the explanation of we can remove rtnl_lock() there are in next patches. Fine grained locks generally are better, then one big lock, so let's do that with net_namespace_list, while the situation allows that. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
95 lines
2.5 KiB
C
95 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* SELinux support for the XFRM LSM hooks
|
|
*
|
|
* Author : Trent Jaeger, <jaegert@us.ibm.com>
|
|
* Updated : Venkat Yekkirala, <vyekkirala@TrustedCS.com>
|
|
*/
|
|
#ifndef _SELINUX_XFRM_H_
|
|
#define _SELINUX_XFRM_H_
|
|
|
|
#include <net/flow.h>
|
|
|
|
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
|
|
struct xfrm_user_sec_ctx *uctx,
|
|
gfp_t gfp);
|
|
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
|
|
struct xfrm_sec_ctx **new_ctxp);
|
|
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
|
|
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
|
|
int selinux_xfrm_state_alloc(struct xfrm_state *x,
|
|
struct xfrm_user_sec_ctx *uctx);
|
|
int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
|
|
struct xfrm_sec_ctx *polsec, u32 secid);
|
|
void selinux_xfrm_state_free(struct xfrm_state *x);
|
|
int selinux_xfrm_state_delete(struct xfrm_state *x);
|
|
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
|
|
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
|
|
struct xfrm_policy *xp,
|
|
const struct flowi *fl);
|
|
|
|
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
|
extern atomic_t selinux_xfrm_refcount;
|
|
|
|
static inline int selinux_xfrm_enabled(void)
|
|
{
|
|
return (atomic_read(&selinux_xfrm_refcount) > 0);
|
|
}
|
|
|
|
int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
|
|
struct common_audit_data *ad);
|
|
int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
|
|
struct common_audit_data *ad, u8 proto);
|
|
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
|
|
int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
|
|
|
|
static inline void selinux_xfrm_notify_policyload(void)
|
|
{
|
|
struct net *net;
|
|
|
|
rtnl_lock();
|
|
down_read(&net_rwsem);
|
|
for_each_net(net)
|
|
rt_genid_bump_all(net);
|
|
up_read(&net_rwsem);
|
|
rtnl_unlock();
|
|
}
|
|
#else
|
|
static inline int selinux_xfrm_enabled(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
|
|
struct common_audit_data *ad)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
|
|
struct common_audit_data *ad,
|
|
u8 proto)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
|
|
int ckall)
|
|
{
|
|
*sid = SECSID_NULL;
|
|
return 0;
|
|
}
|
|
|
|
static inline void selinux_xfrm_notify_policyload(void)
|
|
{
|
|
}
|
|
|
|
static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
|
|
{
|
|
*sid = SECSID_NULL;
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#endif /* _SELINUX_XFRM_H_ */
|