Commit 2181d142 authored by Mauro Carvalho Chehab's avatar Mauro Carvalho Chehab

Merge branch 'sched_warn_fix' into patchwork

* sched_warn_fix:
  sched: Fix compiler warnings
  x86, tsc: Fix cpufreq lockup
parents 419cb9b3 b6220ad6
......@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
static inline const int cpu_corepower_flags(void)
static inline int cpu_corepower_flags(void)
{
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
}
......
......@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymetric SMT dependancy */
static const int powerpc_smt_flags(void)
static int powerpc_smt_flags(void)
{
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
......
......@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
}
set_cyc2ns_scale(tsc_khz, freq->cpu);
set_cyc2ns_scale(tsc_khz, freq->cpu);
}
return 0;
}
......
......@@ -872,21 +872,21 @@ enum cpu_idle_type {
#define SD_NUMA 0x4000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT
static inline const int cpu_smt_flags(void)
static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_SCHED_MC
static inline const int cpu_core_flags(void)
static inline int cpu_core_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_NUMA
static inline const int cpu_numa_flags(void)
static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
......@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef const int (*sched_domain_flags_f)(void);
typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment