forked from luck/tmp_suning_uos_patched
Merge branch 'core/kprobes' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
0cc4bd8f70
|
@ -612,6 +612,18 @@ void wait_for_kprobe_optimizer(void)
|
|||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static bool optprobe_queued_unopt(struct optimized_kprobe *op)
|
||||
{
|
||||
struct optimized_kprobe *_op;
|
||||
|
||||
list_for_each_entry(_op, &unoptimizing_list, list) {
|
||||
if (op == _op)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Optimize kprobe if p is ready to be optimized */
|
||||
static void optimize_kprobe(struct kprobe *p)
|
||||
{
|
||||
|
@ -633,17 +645,21 @@ static void optimize_kprobe(struct kprobe *p)
|
|||
return;
|
||||
|
||||
/* Check if it is already optimized. */
|
||||
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
|
||||
if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
|
||||
if (optprobe_queued_unopt(op)) {
|
||||
/* This is under unoptimizing. Just dequeue the probe */
|
||||
list_del_init(&op->list);
|
||||
}
|
||||
return;
|
||||
}
|
||||
op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
|
||||
|
||||
if (!list_empty(&op->list))
|
||||
/* This is under unoptimizing. Just dequeue the probe */
|
||||
list_del_init(&op->list);
|
||||
else {
|
||||
list_add(&op->list, &optimizing_list);
|
||||
kick_kprobe_optimizer();
|
||||
}
|
||||
/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
|
||||
if (WARN_ON_ONCE(!list_empty(&op->list)))
|
||||
return;
|
||||
|
||||
list_add(&op->list, &optimizing_list);
|
||||
kick_kprobe_optimizer();
|
||||
}
|
||||
|
||||
/* Short cut to direct unoptimizing */
|
||||
|
@ -665,30 +681,33 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
|
|||
return; /* This is not an optprobe nor optimized */
|
||||
|
||||
op = container_of(p, struct optimized_kprobe, kp);
|
||||
if (!kprobe_optimized(p)) {
|
||||
/* Unoptimized or unoptimizing case */
|
||||
if (force && !list_empty(&op->list)) {
|
||||
/*
|
||||
* Only if this is unoptimizing kprobe and forced,
|
||||
* forcibly unoptimize it. (No need to unoptimize
|
||||
* unoptimized kprobe again :)
|
||||
*/
|
||||
if (!kprobe_optimized(p))
|
||||
return;
|
||||
|
||||
if (!list_empty(&op->list)) {
|
||||
if (optprobe_queued_unopt(op)) {
|
||||
/* Queued in unoptimizing queue */
|
||||
if (force) {
|
||||
/*
|
||||
* Forcibly unoptimize the kprobe here, and queue it
|
||||
* in the freeing list for release afterwards.
|
||||
*/
|
||||
force_unoptimize_kprobe(op);
|
||||
list_move(&op->list, &freeing_list);
|
||||
}
|
||||
} else {
|
||||
/* Dequeue from the optimizing queue */
|
||||
list_del_init(&op->list);
|
||||
force_unoptimize_kprobe(op);
|
||||
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!list_empty(&op->list)) {
|
||||
/* Dequeue from the optimization queue */
|
||||
list_del_init(&op->list);
|
||||
return;
|
||||
}
|
||||
/* Optimized kprobe case */
|
||||
if (force)
|
||||
if (force) {
|
||||
/* Forcibly update the code: this is a special case */
|
||||
force_unoptimize_kprobe(op);
|
||||
else {
|
||||
} else {
|
||||
list_add(&op->list, &unoptimizing_list);
|
||||
kick_kprobe_optimizer();
|
||||
}
|
||||
|
|
|
@ -274,7 +274,8 @@ static int __init syscall_enter_define_fields(struct trace_event_call *call)
|
|||
struct syscall_trace_enter trace;
|
||||
struct syscall_metadata *meta = call->data;
|
||||
int offset = offsetof(typeof(trace), args);
|
||||
int ret, i;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < meta->nb_args; i++) {
|
||||
ret = trace_define_field(call, meta->types[i],
|
||||
|
|
Loading…
Reference in New Issue
Block a user