forked from luck/tmp_suning_uos_patched
trace: trivial fixes in comment typos.
Impact: clean up Fixed several typos in the comments. Signed-off-by: Wenji Huang <wenji.huang@oracle.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com>
This commit is contained in:
parent
a81bd80a0b
commit
57794a9d48
|
@ -140,7 +140,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ftrace_make_nop - convert code into top
|
* ftrace_make_nop - convert code into nop
|
||||||
* @mod: module structure if called by module load initialization
|
* @mod: module structure if called by module load initialization
|
||||||
* @rec: the mcount call site record
|
* @rec: the mcount call site record
|
||||||
* @addr: the address that the call site should be calling
|
* @addr: the address that the call site should be calling
|
||||||
|
|
|
@ -465,7 +465,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||||
* it is not enabled then do nothing.
|
* it is not enabled then do nothing.
|
||||||
*
|
*
|
||||||
* If this record is not to be traced and
|
* If this record is not to be traced and
|
||||||
* it is enabled then disabled it.
|
* it is enabled then disable it.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
if (rec->flags & FTRACE_FL_NOTRACE) {
|
if (rec->flags & FTRACE_FL_NOTRACE) {
|
||||||
|
@ -485,7 +485,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||||
if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
|
if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Record is not filtered and is not enabled do nothing */
|
/* Record is not filtered or enabled, do nothing */
|
||||||
if (!fl)
|
if (!fl)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -507,7 +507,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* if record is not enabled do nothing */
|
/* if record is not enabled, do nothing */
|
||||||
if (!(rec->flags & FTRACE_FL_ENABLED))
|
if (!(rec->flags & FTRACE_FL_ENABLED))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -616,12 +616,12 @@ extern struct tracer nop_trace;
|
||||||
* preempt_enable (after a disable), a schedule might take place
|
* preempt_enable (after a disable), a schedule might take place
|
||||||
* causing an infinite recursion.
|
* causing an infinite recursion.
|
||||||
*
|
*
|
||||||
* To prevent this, we read the need_recshed flag before
|
* To prevent this, we read the need_resched flag before
|
||||||
* disabling preemption. When we want to enable preemption we
|
* disabling preemption. When we want to enable preemption we
|
||||||
* check the flag, if it is set, then we call preempt_enable_no_resched.
|
* check the flag, if it is set, then we call preempt_enable_no_resched.
|
||||||
* Otherwise, we call preempt_enable.
|
* Otherwise, we call preempt_enable.
|
||||||
*
|
*
|
||||||
* The rational for doing the above is that if need resched is set
|
* The rational for doing the above is that if need_resched is set
|
||||||
* and we have yet to reschedule, we are either in an atomic location
|
* and we have yet to reschedule, we are either in an atomic location
|
||||||
* (where we do not need to check for scheduling) or we are inside
|
* (where we do not need to check for scheduling) or we are inside
|
||||||
* the scheduler and do not want to resched.
|
* the scheduler and do not want to resched.
|
||||||
|
@ -642,7 +642,7 @@ static inline int ftrace_preempt_disable(void)
|
||||||
*
|
*
|
||||||
* This is a scheduler safe way to enable preemption and not miss
|
* This is a scheduler safe way to enable preemption and not miss
|
||||||
* any preemption checks. The disabled saved the state of preemption.
|
* any preemption checks. The disabled saved the state of preemption.
|
||||||
* If resched is set, then we were either inside an atomic or
|
* If resched is set, then we are either inside an atomic or
|
||||||
* are inside the scheduler (we would have already scheduled
|
* are inside the scheduler (we would have already scheduled
|
||||||
* otherwise). In this case, we do not want to call normal
|
* otherwise). In this case, we do not want to call normal
|
||||||
* preempt_enable, but preempt_enable_no_resched instead.
|
* preempt_enable, but preempt_enable_no_resched instead.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user