IB/core: Don't register each MAD agent for LSM notifier

When creating many MAD agents in a short period of time, receive packet
processing can be delayed long enough to cause timeouts while new agents
are being added to the atomic notifier chain with IRQs disabled.  Notifier
chain registration and unregstration is an O(n) operation. With large
numbers of MAD agents being created and destroyed simultaneously the CPUs
spend too much time with interrupts disabled.

Instead of each MAD agent registering for it's own LSM notification,
maintain a list of agents internally and register once, this registration
already existed for handling the PKeys. This list is write mostly, so a
normal spin lock is used vs a read/write lock. All MAD agents must be
checked, so a single list is used instead of breaking them down per
device.

Notifier calls are done under rcu_read_lock, so there isn't a risk of
similar packet timeouts while checking the MAD agents security settings
when notified.

Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Reviewed-by: Parav Pandit <parav@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Acked-by: Paul Moore <paul@paul-moore.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Daniel Jurgens 2019-02-02 11:09:45 +02:00 committed by Jason Gunthorpe
parent 805b754d49
commit c66f67414c
4 changed files with 34 additions and 23 deletions

View File

@ -202,6 +202,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
enum ib_qp_type qp_type);
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
void ib_mad_agent_security_change(void);
#else
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
@ -267,6 +268,10 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
{
return 0;
}
static inline void ib_mad_agent_security_change(void)
{
}
#endif
struct ib_device *ib_device_get_by_index(u32 ifindex);

View File

@ -452,6 +452,7 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
return NOTIFY_DONE;
schedule_work(&ib_policy_change_work);
ib_mad_agent_security_change();
return NOTIFY_OK;
}

View File

@ -39,6 +39,10 @@
#include "core_priv.h"
#include "mad_priv.h"
static LIST_HEAD(mad_agent_list);
/* Lock to protect mad_agent_list */
static DEFINE_SPINLOCK(mad_agent_list_lock);
static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
{
struct pkey_index_qp_list *pkey = NULL;
@ -676,19 +680,18 @@ static int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
static int ib_mad_agent_security_change(struct notifier_block *nb,
unsigned long event,
void *data)
void ib_mad_agent_security_change(void)
{
struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
struct ib_mad_agent *ag;
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
ag->smp_allowed = !security_ib_endport_manage_subnet(
ag->security, dev_name(&ag->device->dev), ag->port_num);
return NOTIFY_OK;
spin_lock(&mad_agent_list_lock);
list_for_each_entry(ag,
&mad_agent_list,
mad_agent_sec_list)
WRITE_ONCE(ag->smp_allowed,
!security_ib_endport_manage_subnet(ag->security,
dev_name(&ag->device->dev), ag->port_num));
spin_unlock(&mad_agent_list_lock);
}
int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
@ -699,6 +702,8 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (!rdma_protocol_ib(agent->device, agent->port_num))
return 0;
INIT_LIST_HEAD(&agent->mad_agent_sec_list);
ret = security_ib_alloc_security(&agent->security);
if (ret)
return ret;
@ -706,22 +711,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
if (qp_type != IB_QPT_SMI)
return 0;
spin_lock(&mad_agent_list_lock);
ret = security_ib_endport_manage_subnet(agent->security,
dev_name(&agent->device->dev),
agent->port_num);
if (ret)
goto free_security;
agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
ret = register_lsm_notifier(&agent->lsm_nb);
if (ret)
goto free_security;
agent->smp_allowed = true;
agent->lsm_nb_reg = true;
WRITE_ONCE(agent->smp_allowed, true);
list_add(&agent->mad_agent_sec_list, &mad_agent_list);
spin_unlock(&mad_agent_list_lock);
return 0;
free_security:
spin_unlock(&mad_agent_list_lock);
security_ib_free_security(agent->security);
return ret;
}
@ -731,8 +734,11 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
if (!rdma_protocol_ib(agent->device, agent->port_num))
return;
if (agent->lsm_nb_reg)
unregister_lsm_notifier(&agent->lsm_nb);
if (agent->qp->qp_type == IB_QPT_SMI) {
spin_lock(&mad_agent_list_lock);
list_del(&agent->mad_agent_sec_list);
spin_unlock(&mad_agent_list_lock);
}
security_ib_free_security(agent->security);
}
@ -743,7 +749,7 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
return 0;
if (map->agent.qp->qp_type == IB_QPT_SMI) {
if (!map->agent.smp_allowed)
if (!READ_ONCE(map->agent.smp_allowed))
return -EACCES;
return 0;
}

View File

@ -617,11 +617,10 @@ struct ib_mad_agent {
u32 hi_tid;
u32 flags;
void *security;
struct notifier_block lsm_nb;
struct list_head mad_agent_sec_list;
u8 port_num;
u8 rmpp_version;
bool smp_allowed;
bool lsm_nb_reg;
};
/**