Commit c034f48e authored by Randy Dunlap's avatar Randy Dunlap Committed by Linus Torvalds

kernel: delete repeated words in comments

Drop repeated words in kernel/events/.
{if, the, that, with, time}

Drop repeated words in kernel/locking/.
{it, no, the}

Drop repeated words in kernel/sched/.
{in, not}

Link: https://lkml.kernel.org/r/20210127023412.26292-1-rdunlap@infradead.orgSigned-off-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Acked-by: Will Deacon <will@kernel.org>	[kernel/locking/]
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e1e01411
...@@ -269,7 +269,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da ...@@ -269,7 +269,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
if (!event->parent) { if (!event->parent) {
/* /*
* If this is a !child event, we must hold ctx::mutex to * If this is a !child event, we must hold ctx::mutex to
* stabilize the the event->ctx relation. See * stabilize the event->ctx relation. See
* perf_event_ctx_lock(). * perf_event_ctx_lock().
*/ */
lockdep_assert_held(&ctx->mutex); lockdep_assert_held(&ctx->mutex);
...@@ -1303,7 +1303,7 @@ static void put_ctx(struct perf_event_context *ctx) ...@@ -1303,7 +1303,7 @@ static void put_ctx(struct perf_event_context *ctx)
* life-time rules separate them. That is an exiting task cannot fork, and a * life-time rules separate them. That is an exiting task cannot fork, and a
* spawning task cannot (yet) exit. * spawning task cannot (yet) exit.
* *
* But remember that that these are parent<->child context relations, and * But remember that these are parent<->child context relations, and
* migration does not affect children, therefore these two orderings should not * migration does not affect children, therefore these two orderings should not
* interact. * interact.
* *
...@@ -1442,7 +1442,7 @@ static u64 primary_event_id(struct perf_event *event) ...@@ -1442,7 +1442,7 @@ static u64 primary_event_id(struct perf_event *event)
/* /*
* Get the perf_event_context for a task and lock it. * Get the perf_event_context for a task and lock it.
* *
* This has to cope with with the fact that until it is locked, * This has to cope with the fact that until it is locked,
* the context could get moved to another task. * the context could get moved to another task.
*/ */
static struct perf_event_context * static struct perf_event_context *
...@@ -2486,7 +2486,7 @@ static void perf_set_shadow_time(struct perf_event *event, ...@@ -2486,7 +2486,7 @@ static void perf_set_shadow_time(struct perf_event *event,
* But this is a bit hairy. * But this is a bit hairy.
* *
* So instead, we have an explicit cgroup call to remain * So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it * within the time source all along. We believe it
* is cleaner and simpler to understand. * is cleaner and simpler to understand.
*/ */
if (is_cgroup_event(event)) if (is_cgroup_event(event))
......
...@@ -1733,7 +1733,7 @@ void uprobe_free_utask(struct task_struct *t) ...@@ -1733,7 +1733,7 @@ void uprobe_free_utask(struct task_struct *t)
} }
/* /*
* Allocate a uprobe_task object for the task if if necessary. * Allocate a uprobe_task object for the task if necessary.
* Called when the thread hits a breakpoint. * Called when the thread hits a breakpoint.
* *
* Returns: * Returns:
......
...@@ -1420,7 +1420,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, ...@@ -1420,7 +1420,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
} }
/* /*
* Performs the wakeup of the the top-waiter and re-enables preemption. * Performs the wakeup of the top-waiter and re-enables preemption.
*/ */
void rt_mutex_postunlock(struct wake_q_head *wake_q) void rt_mutex_postunlock(struct wake_q_head *wake_q)
{ {
...@@ -1819,7 +1819,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) ...@@ -1819,7 +1819,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
* been started. * been started.
* @waiter: the pre-initialized rt_mutex_waiter * @waiter: the pre-initialized rt_mutex_waiter
* *
* Wait for the the lock acquisition started on our behalf by * Wait for the lock acquisition started on our behalf by
* rt_mutex_start_proxy_lock(). Upon failure, the caller must call * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
* rt_mutex_cleanup_proxy_lock(). * rt_mutex_cleanup_proxy_lock().
* *
......
...@@ -1048,7 +1048,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) ...@@ -1048,7 +1048,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
/* /*
* If there were already threads queued before us and: * If there were already threads queued before us and:
* 1) there are no no active locks, wake the front * 1) there are no active locks, wake the front
* queued process(es) as the handoff bit might be set. * queued process(es) as the handoff bit might be set.
* 2) there are no active writers and some readers, the lock * 2) there are no active writers and some readers, the lock
* must be read owned; so we try to wake any read lock * must be read owned; so we try to wake any read lock
......
...@@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable); ...@@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable);
* @sem: the semaphore to be acquired * @sem: the semaphore to be acquired
* *
* Try to acquire the semaphore atomically. Returns 0 if the semaphore has * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
* been acquired successfully or 1 if it it cannot be acquired. * been acquired successfully or 1 if it cannot be acquired.
* *
* NOTE: This return value is inverted from both spin_trylock and * NOTE: This return value is inverted from both spin_trylock and
* mutex_trylock! Be careful about this when converting code. * mutex_trylock! Be careful about this when converting code.
......
...@@ -5126,7 +5126,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) ...@@ -5126,7 +5126,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* /*
* When a group wakes up we want to make sure that its quota is not already * When a group wakes up we want to make sure that its quota is not already
* expired/exceeded, otherwise it may be allowed to steal additional ticks of * expired/exceeded, otherwise it may be allowed to steal additional ticks of
* runtime as update_curr() throttling can not not trigger until it's on-rq. * runtime as update_curr() throttling can not trigger until it's on-rq.
*/ */
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{ {
......
...@@ -454,7 +454,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) ...@@ -454,7 +454,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
/* /*
* For each cpu runqueue, if the task's mm match @mm, ensure that all * For each cpu runqueue, if the task's mm match @mm, ensure that all
* @mm's membarrier state set bits are also set in in the runqueue's * @mm's membarrier state set bits are also set in the runqueue's
* membarrier state. This ensures that a runqueue scheduling * membarrier state. This ensures that a runqueue scheduling
* between threads which are users of @mm has its membarrier state * between threads which are users of @mm has its membarrier state
* updated. * updated.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment