forked from luck/tmp_suning_uos_patched
proc: turn signal_struct->count into "int nr_threads"
No functional changes, just s/atomic_t count/int nr_threads/. With the recent changes this counter has a single user, get_nr_threads() And, none of its callers need the really accurate number of threads, not to mention each caller obviously races with fork/exit. It is only used to report this value to the user-space, except first_tid() uses it to avoid the unnecessary while_each_thread() loop in the unlikely case. It is a bit sad we need a word in struct signal_struct for this, perhaps we can change get_nr_threads() to approximate the number of threads using signal->live and kill ->nr_threads later. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Acked-by: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dd98acf747
commit
b3ac022cb9
|
@ -16,7 +16,7 @@ extern struct files_struct init_files;
|
||||||
extern struct fs_struct init_fs;
|
extern struct fs_struct init_fs;
|
||||||
|
|
||||||
#define INIT_SIGNALS(sig) { \
|
#define INIT_SIGNALS(sig) { \
|
||||||
.count = ATOMIC_INIT(1), \
|
.nr_threads = 1, \
|
||||||
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
|
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
|
||||||
.shared_pending = { \
|
.shared_pending = { \
|
||||||
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
|
.list = LIST_HEAD_INIT(sig.shared_pending.list), \
|
||||||
|
|
|
@ -527,8 +527,8 @@ struct thread_group_cputimer {
|
||||||
*/
|
*/
|
||||||
struct signal_struct {
|
struct signal_struct {
|
||||||
atomic_t sigcnt;
|
atomic_t sigcnt;
|
||||||
atomic_t count;
|
|
||||||
atomic_t live;
|
atomic_t live;
|
||||||
|
int nr_threads;
|
||||||
|
|
||||||
wait_queue_head_t wait_chldexit; /* for wait4() */
|
wait_queue_head_t wait_chldexit; /* for wait4() */
|
||||||
|
|
||||||
|
@ -2149,7 +2149,7 @@ extern bool current_is_single_threaded(void);
|
||||||
|
|
||||||
static inline int get_nr_threads(struct task_struct *tsk)
|
static inline int get_nr_threads(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
return atomic_read(&tsk->signal->count);
|
return tsk->signal->nr_threads;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* de_thread depends on thread_group_leader not being a pid based check */
|
/* de_thread depends on thread_group_leader not being a pid based check */
|
||||||
|
|
|
@ -83,14 +83,10 @@ static void __exit_signal(struct task_struct *tsk)
|
||||||
struct sighand_struct *sighand;
|
struct sighand_struct *sighand;
|
||||||
struct tty_struct *uninitialized_var(tty);
|
struct tty_struct *uninitialized_var(tty);
|
||||||
|
|
||||||
BUG_ON(!sig);
|
|
||||||
BUG_ON(!atomic_read(&sig->count));
|
|
||||||
|
|
||||||
sighand = rcu_dereference_check(tsk->sighand,
|
sighand = rcu_dereference_check(tsk->sighand,
|
||||||
rcu_read_lock_held() ||
|
rcu_read_lock_held() ||
|
||||||
lockdep_tasklist_lock_is_held());
|
lockdep_tasklist_lock_is_held());
|
||||||
spin_lock(&sighand->siglock);
|
spin_lock(&sighand->siglock);
|
||||||
atomic_dec(&sig->count);
|
|
||||||
|
|
||||||
posix_cpu_timers_exit(tsk);
|
posix_cpu_timers_exit(tsk);
|
||||||
if (group_dead) {
|
if (group_dead) {
|
||||||
|
@ -130,6 +126,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||||
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
|
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sig->nr_threads--;
|
||||||
__unhash_process(tsk, group_dead);
|
__unhash_process(tsk, group_dead);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -877,9 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
||||||
if (!sig)
|
if (!sig)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
atomic_set(&sig->sigcnt, 1);
|
sig->nr_threads = 1;
|
||||||
atomic_set(&sig->count, 1);
|
|
||||||
atomic_set(&sig->live, 1);
|
atomic_set(&sig->live, 1);
|
||||||
|
atomic_set(&sig->sigcnt, 1);
|
||||||
init_waitqueue_head(&sig->wait_chldexit);
|
init_waitqueue_head(&sig->wait_chldexit);
|
||||||
if (clone_flags & CLONE_NEWPID)
|
if (clone_flags & CLONE_NEWPID)
|
||||||
sig->flags |= SIGNAL_UNKILLABLE;
|
sig->flags |= SIGNAL_UNKILLABLE;
|
||||||
|
@ -1256,9 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clone_flags & CLONE_THREAD) {
|
if (clone_flags & CLONE_THREAD) {
|
||||||
atomic_inc(¤t->signal->sigcnt);
|
current->signal->nr_threads++;
|
||||||
atomic_inc(¤t->signal->count);
|
|
||||||
atomic_inc(¤t->signal->live);
|
atomic_inc(¤t->signal->live);
|
||||||
|
atomic_inc(¤t->signal->sigcnt);
|
||||||
p->group_leader = current->group_leader;
|
p->group_leader = current->group_leader;
|
||||||
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
|
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user