Commit 28fbc8b6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Docbook fixes that make 99% of the diffstat, plus a oneliner fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Ensure update_cfs_shares() is called for parents of continuously-running tasks
  sched: Fix some kernel-doc warnings
parents bfd36050 bf0bd948
...@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) ...@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state) * Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure * If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced. * can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/ */
static inline int pid_alive(struct task_struct *p) static inline int pid_alive(struct task_struct *p)
{ {
...@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p) ...@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked. * @tsk: Task structure to be checked.
* *
* Check if a task structure is the first user space task the kernel created. * Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/ */
static inline int is_global_init(struct task_struct *tsk) static inline int is_global_init(struct task_struct *tsk)
{ {
...@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu); ...@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
/** /**
* is_idle_task - is the specified task an idle task? * is_idle_task - is the specified task an idle task?
* @p: the task in question. * @p: the task in question.
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/ */
static inline bool is_idle_task(const struct task_struct *p) static inline bool is_idle_task(const struct task_struct *p)
{ {
......
This diff is collapsed.
...@@ -62,7 +62,7 @@ static int convert_prio(int prio) ...@@ -62,7 +62,7 @@ static int convert_prio(int prio)
* any discrepancies created by racing against the uncertainty of the current * any discrepancies created by racing against the uncertainty of the current
* priority configuration. * priority configuration.
* *
* Returns: (int)bool - CPUs were found * Return: (int)bool - CPUs were found
*/ */
int cpupri_find(struct cpupri *cp, struct task_struct *p, int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask) struct cpumask *lowest_mask)
...@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) ...@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
* cpupri_init - initialize the cpupri structure * cpupri_init - initialize the cpupri structure
* @cp: The cpupri context * @cp: The cpupri context
* *
* Returns: -ENOMEM if memory fails. * Return: -ENOMEM on memory allocation failure.
*/ */
int cpupri_init(struct cpupri *cp) int cpupri_init(struct cpupri *cp)
{ {
......
...@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) ...@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
*/ */
update_entity_load_avg(curr, 1); update_entity_load_avg(curr, 1);
update_cfs_rq_blocked_load(cfs_rq, 1); update_cfs_rq_blocked_load(cfs_rq, 1);
update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SCHED_HRTICK
/* /*
...@@ -4280,6 +4281,8 @@ struct sg_lb_stats { ...@@ -4280,6 +4281,8 @@ struct sg_lb_stats {
* get_sd_load_idx - Obtain the load index for a given sched domain. * get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained. * @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained. * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
*
* Return: The load index.
*/ */
static inline int get_sd_load_idx(struct sched_domain *sd, static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle) enum cpu_idle_type idle)
...@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, ...@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
* *
* Determine if @sg is a busier group than the previously selected * Determine if @sg is a busier group than the previously selected
* busiest group. * busiest group.
*
* Return: %true if @sg is a busier group than the previously selected
* busiest group. %false otherwise.
*/ */
static bool update_sd_pick_busiest(struct lb_env *env, static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds, struct sd_lb_stats *sds,
...@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, ...@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* assuming lower CPU number will be equivalent to lower a SMT thread * assuming lower CPU number will be equivalent to lower a SMT thread
* number. * number.
* *
* Returns 1 when packing is required and a task should be moved to * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance. * this CPU. The amount of the imbalance is returned in *imbalance.
* *
* @env: The load balancing environment. * @env: The load balancing environment.
...@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s ...@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* @balance: Pointer to a variable indicating if this_cpu * @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level. * is the appropriate cpu to perform load balancing at this_level.
* *
* Returns: - the busiest group if imbalance exists. * Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance, * - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be * return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group. * put to idle by rebalancing its tasks onto our group.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment