forked from luck/tmp_suning_uos_patched
target: close target_put_sess_cmd() vs. core_tmr_abort_task() race
It is possible for one thread to to take se_sess->sess_cmd_lock in core_tmr_abort_task() before taking a reference count on se_cmd->cmd_kref, while another thread in target_put_sess_cmd() drops se_cmd->cmd_kref before taking se_sess->sess_cmd_lock. This introduces kref_put_spinlock_irqsave() and uses it in target_put_sess_cmd() to close the race window. Signed-off-by: Joern Engel <joern@logfs.org> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: <stable@vger.kernel.org> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
parent
a1321ddd27
commit
ccf5ae83a6
@ -2211,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
|
||||
{
|
||||
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
||||
if (list_empty(&se_cmd->se_cmd_list)) {
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
spin_unlock(&se_sess->sess_cmd_lock);
|
||||
se_cmd->se_tfo->release_cmd(se_cmd);
|
||||
return;
|
||||
}
|
||||
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
spin_unlock(&se_sess->sess_cmd_lock);
|
||||
complete(&se_cmd->cmd_wait_comp);
|
||||
return;
|
||||
}
|
||||
list_del(&se_cmd->se_cmd_list);
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
spin_unlock(&se_sess->sess_cmd_lock);
|
||||
|
||||
se_cmd->se_tfo->release_cmd(se_cmd);
|
||||
}
|
||||
@ -2236,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
|
||||
*/
|
||||
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
|
||||
{
|
||||
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
|
||||
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
|
||||
&se_sess->sess_cmd_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(target_put_sess_cmd);
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct kref {
|
||||
atomic_t refcount;
|
||||
@ -95,6 +96,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
|
||||
return kref_sub(kref, 1, release);
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_put_spinlock_irqsave - decrement refcount for object.
|
||||
* @kref: object.
|
||||
* @release: pointer to the function that will clean up the object when the
|
||||
* last reference to the object is released.
|
||||
* This pointer is required, and it is not acceptable to pass kfree
|
||||
* in as this function.
|
||||
* @lock: lock to take in release case
|
||||
*
|
||||
* Behaves identical to kref_put with one exception. If the reference count
|
||||
* drops to zero, the lock will be taken atomically wrt dropping the reference
|
||||
* count. The release function has to call spin_unlock() without _irqrestore.
|
||||
*/
|
||||
static inline int kref_put_spinlock_irqsave(struct kref *kref,
|
||||
void (*release)(struct kref *kref),
|
||||
spinlock_t *lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(release == NULL);
|
||||
if (atomic_add_unless(&kref->refcount, -1, 1))
|
||||
return 0;
|
||||
spin_lock_irqsave(lock, flags);
|
||||
if (atomic_dec_and_test(&kref->refcount)) {
|
||||
release(kref);
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
}
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kref_put_mutex(struct kref *kref,
|
||||
void (*release)(struct kref *kref),
|
||||
struct mutex *lock)
|
||||
|
Loading…
Reference in New Issue
Block a user