forked from luck/tmp_suning_uos_patched
module, async: async_synchronize_full() on module init iff async is used
If the default iosched is built as module, the kernel may deadlock while trying to load the iosched module on device probe if the probing was running off async. This is because async_synchronize_full() at the end of module init ends up waiting for the async job which initiated the module loading. async A modprobe 1. finds a device 2. registers the block device 3. request_module(default iosched) 4. modprobe in userland 5. load and init module 6. async_synchronize_full() Async A waits for modprobe to finish in request_module() and modprobe waits for async A to finish in async_synchronize_full(). Because there's no easy to track dependency once control goes out to userland, implementing properly nested flushing is difficult. For now, make module init perform async_synchronize_full() iff module init has queued async jobs as suggested by Linus. This avoids the described deadlock because iosched module doesn't use async and thus wouldn't invoke async_synchronize_full(). This is hacky and incomplete. It will deadlock if async module loading nests; however, this works around the known problem case and seems to be the best of bad options. For more details, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1420814 Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Alex Riesen <raa.lkml@gmail.com> Tested-by: Ming Lei <ming.lei@canonical.com> Tested-by: Alex Riesen <raa.lkml@gmail.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
406089d015
commit
774a1221e8
|
@ -1810,6 +1810,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
|
||||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||||
|
#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
|
||||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||||
|
|
|
@ -196,6 +196,9 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
|
||||||
atomic_inc(&entry_count);
|
atomic_inc(&entry_count);
|
||||||
spin_unlock_irqrestore(&async_lock, flags);
|
spin_unlock_irqrestore(&async_lock, flags);
|
||||||
|
|
||||||
|
/* mark that this task has queued an async job, used by module init */
|
||||||
|
current->flags |= PF_USED_ASYNC;
|
||||||
|
|
||||||
/* schedule for execution */
|
/* schedule for execution */
|
||||||
queue_work(system_unbound_wq, &entry->work);
|
queue_work(system_unbound_wq, &entry->work);
|
||||||
|
|
||||||
|
|
|
@ -3013,6 +3013,12 @@ static int do_init_module(struct module *mod)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We want to find out whether @mod uses async during init. Clear
|
||||||
|
* PF_USED_ASYNC. async_schedule*() will set it.
|
||||||
|
*/
|
||||||
|
current->flags &= ~PF_USED_ASYNC;
|
||||||
|
|
||||||
blocking_notifier_call_chain(&module_notify_list,
|
blocking_notifier_call_chain(&module_notify_list,
|
||||||
MODULE_STATE_COMING, mod);
|
MODULE_STATE_COMING, mod);
|
||||||
|
|
||||||
|
@ -3058,8 +3064,25 @@ static int do_init_module(struct module *mod)
|
||||||
blocking_notifier_call_chain(&module_notify_list,
|
blocking_notifier_call_chain(&module_notify_list,
|
||||||
MODULE_STATE_LIVE, mod);
|
MODULE_STATE_LIVE, mod);
|
||||||
|
|
||||||
/* We need to finish all async code before the module init sequence is done */
|
/*
|
||||||
async_synchronize_full();
|
* We need to finish all async code before the module init sequence
|
||||||
|
* is done. This has potential to deadlock. For example, a newly
|
||||||
|
* detected block device can trigger request_module() of the
|
||||||
|
* default iosched from async probing task. Once userland helper
|
||||||
|
* reaches here, async_synchronize_full() will wait on the async
|
||||||
|
* task waiting on request_module() and deadlock.
|
||||||
|
*
|
||||||
|
* This deadlock is avoided by perfomring async_synchronize_full()
|
||||||
|
* iff module init queued any async jobs. This isn't a full
|
||||||
|
* solution as it will deadlock the same if module loading from
|
||||||
|
* async jobs nests more than once; however, due to the various
|
||||||
|
* constraints, this hack seems to be the best option for now.
|
||||||
|
* Please refer to the following thread for details.
|
||||||
|
*
|
||||||
|
* http://thread.gmane.org/gmane.linux.kernel/1420814
|
||||||
|
*/
|
||||||
|
if (current->flags & PF_USED_ASYNC)
|
||||||
|
async_synchronize_full();
|
||||||
|
|
||||||
mutex_lock(&module_mutex);
|
mutex_lock(&module_mutex);
|
||||||
/* Drop initial reference. */
|
/* Drop initial reference. */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user