Commit 499f7bb0 authored by Qiujun Huang's avatar Qiujun Huang Committed by Steven Rostedt (VMware)

tracing: Fix some typos in comments

s/wihin/within/
s/retrieven/retrieved/
s/suppport/support/
s/wil/will/
s/accidently/accidentally/
s/if the if the/if the/

Link: https://lkml.kernel.org/r/20201010140924.3809-1-hqjagain@gmail.comSigned-off-by: default avatarQiujun Huang <hqjagain@gmail.com>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent c1634097
...@@ -9465,7 +9465,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -9465,7 +9465,7 @@ __init static int tracer_alloc_buffers(void)
} }
/* /*
* Make sure we don't accidently add more trace options * Make sure we don't accidentally add more trace options
* than we have bits for. * than we have bits for.
*/ */
BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
...@@ -9494,7 +9494,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -9494,7 +9494,7 @@ __init static int tracer_alloc_buffers(void)
/* /*
* The prepare callbacks allocates some memory for the ring buffer. We * The prepare callbacks allocates some memory for the ring buffer. We
* don't free the buffer if the if the CPU goes down. If we were to free * don't free the buffer if the CPU goes down. If we were to free
* the buffer, then the user would lose any trace that was in the * the buffer, then the user would lose any trace that was in the
* buffer. The memory will be removed once the "instance" is removed. * buffer. The memory will be removed once the "instance" is removed.
*/ */
......
...@@ -246,7 +246,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); ...@@ -246,7 +246,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
* tracing_snapshot_cond(tr, cond_data), the cond_data passed in is * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
* passed in turn to the cond_snapshot.update() function. That data * passed in turn to the cond_snapshot.update() function. That data
* can be compared by the update() implementation with the cond_data * can be compared by the update() implementation with the cond_data
* contained wihin the struct cond_snapshot instance associated with * contained within the struct cond_snapshot instance associated with
* the trace_array. Because the tr->max_lock is held throughout the * the trace_array. Because the tr->max_lock is held throughout the
* update() call, the update() function can directly retrieve the * update() call, the update() function can directly retrieve the
* cond_snapshot and cond_data associated with the per-instance * cond_snapshot and cond_data associated with the per-instance
...@@ -271,7 +271,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); ...@@ -271,7 +271,7 @@ typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
* take the snapshot, by returning 'true' if so, 'false' if no * take the snapshot, by returning 'true' if so, 'false' if no
* snapshot should be taken. Because the max_lock is held for * snapshot should be taken. Because the max_lock is held for
* the duration of update(), the implementation is safe to * the duration of update(), the implementation is safe to
* directly retrieven and save any implementation data it needs * directly retrieved and save any implementation data it needs
* to in association with the snapshot. * to in association with the snapshot.
*/ */
struct cond_snapshot { struct cond_snapshot {
...@@ -573,7 +573,7 @@ struct tracer { ...@@ -573,7 +573,7 @@ struct tracer {
* The function callback, which can use the FTRACE bits to * The function callback, which can use the FTRACE bits to
* check for recursion. * check for recursion.
* *
* Now if the arch does not suppport a feature, and it calls * Now if the arch does not support a feature, and it calls
* the global list function which calls the ftrace callback * the global list function which calls the ftrace callback
* all three of these steps will do a recursion protection. * all three of these steps will do a recursion protection.
* There's no reason to do one if the previous caller already * There's no reason to do one if the previous caller already
...@@ -1479,7 +1479,7 @@ __trace_event_discard_commit(struct trace_buffer *buffer, ...@@ -1479,7 +1479,7 @@ __trace_event_discard_commit(struct trace_buffer *buffer,
/* /*
* Helper function for event_trigger_unlock_commit{_regs}(). * Helper function for event_trigger_unlock_commit{_regs}().
* If there are event triggers attached to this event that requires * If there are event triggers attached to this event that requires
* filtering against its fields, then they wil be called as the * filtering against its fields, then they will be called as the
* entry already holds the field information of the current event. * entry already holds the field information of the current event.
* *
* It also checks if the event should be discarded or not. * It also checks if the event should be discarded or not.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment