Commit c6a597fc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-fixes-6.8-3' of...

Merge tag 'loongarch-fixes-6.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Fix two cpu-hotplug issues, fix the init sequence about FDT system,
  fix the coding style of dts, and fix the wrong CPUCFG ID handling of
  KVM"

* tag 'loongarch-fixes-6.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: KVM: Streamline kvm_check_cpucfg() and improve comments
  LoongArch: KVM: Rename _kvm_get_cpucfg() to _kvm_get_cpucfg_mask()
  LoongArch: KVM: Fix input validation of _kvm_get_cpucfg() & kvm_check_cpucfg()
  LoongArch: dts: Minor whitespace cleanup
  LoongArch: Call early_init_fdt_scan_reserved_mem() earlier
  LoongArch: Update cpu_sibling_map when disabling nonboot CPUs
  LoongArch: Disable IRQ before init_fn() for nonboot CPUs
parents 603c04e2 f0f5c489
...@@ -60,7 +60,7 @@ &i2c0 { ...@@ -60,7 +60,7 @@ &i2c0 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
eeprom@57{ eeprom@57 {
compatible = "atmel,24c16"; compatible = "atmel,24c16";
reg = <0x57>; reg = <0x57>;
pagesize = <16>; pagesize = <16>;
......
...@@ -78,7 +78,7 @@ &i2c2 { ...@@ -78,7 +78,7 @@ &i2c2 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
eeprom@57{ eeprom@57 {
compatible = "atmel,24c16"; compatible = "atmel,24c16";
reg = <0x57>; reg = <0x57>;
pagesize = <16>; pagesize = <16>;
......
...@@ -357,6 +357,8 @@ void __init platform_init(void) ...@@ -357,6 +357,8 @@ void __init platform_init(void)
acpi_gbl_use_default_register_widths = false; acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init(); acpi_boot_table_init();
#endif #endif
early_init_fdt_scan_reserved_mem();
unflatten_and_copy_device_tree(); unflatten_and_copy_device_tree();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
...@@ -390,8 +392,6 @@ static void __init arch_mem_init(char **cmdline_p) ...@@ -390,8 +392,6 @@ static void __init arch_mem_init(char **cmdline_p)
check_kernel_sections_mem(); check_kernel_sections_mem();
early_init_fdt_scan_reserved_mem();
/* /*
* In order to reduce the possibility of kernel panic when failed to * In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
......
...@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec) ...@@ -88,6 +88,73 @@ void show_ipi_list(struct seq_file *p, int prec)
} }
} }
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
}
static inline void clear_cpu_sibling_map(int cpu)
{
int i;
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
}
}
cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
/* Send mailbox buffer via Mail_Send */ /* Send mailbox buffer via Mail_Send */
static void csr_mail_send(uint64_t data, int cpu, int mailbox) static void csr_mail_send(uint64_t data, int cpu, int mailbox)
{ {
...@@ -303,6 +370,7 @@ int loongson_cpu_disable(void) ...@@ -303,6 +370,7 @@ int loongson_cpu_disable(void)
numa_remove_cpu(cpu); numa_remove_cpu(cpu);
#endif #endif
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
clear_cpu_sibling_map(cpu);
calculate_cpu_foreign_map(); calculate_cpu_foreign_map();
local_irq_save(flags); local_irq_save(flags);
irq_migrate_all_off_this_cpu(); irq_migrate_all_off_this_cpu();
...@@ -337,6 +405,7 @@ void __noreturn arch_cpu_idle_dead(void) ...@@ -337,6 +405,7 @@ void __noreturn arch_cpu_idle_dead(void)
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0); addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
} while (addr == 0); } while (addr == 0);
local_irq_disable();
init_fn = (void *)TO_CACHE(addr); init_fn = (void *)TO_CACHE(addr);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
...@@ -379,59 +448,6 @@ static int __init ipi_pm_init(void) ...@@ -379,59 +448,6 @@ static int __init ipi_pm_init(void)
core_initcall(ipi_pm_init); core_initcall(ipi_pm_init);
#endif #endif
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
/* Preload SMP state for boot cpu */ /* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void) void smp_prepare_boot_cpu(void)
{ {
......
...@@ -298,74 +298,73 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) ...@@ -298,74 +298,73 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
return ret; return ret;
} }
static int _kvm_get_cpucfg(int id, u64 *v) static int _kvm_get_cpucfg_mask(int id, u64 *v)
{ {
int ret = 0; if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
return -EINVAL; return -EINVAL;
switch (id) { switch (id) {
case 2: case 2:
/* Return CPUCFG2 features which have been supported by KVM */ /* CPUCFG2 features unconditionally supported by KVM */
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
CPUCFG2_LAM; CPUCFG2_LAM;
/* /*
* If LSX is supported by CPU, it is also supported by KVM, * For the ISA extensions listed below, if one is supported
* as we implement it. * by the host, then it is also supported by KVM.
*/ */
if (cpu_has_lsx) if (cpu_has_lsx)
*v |= CPUCFG2_LSX; *v |= CPUCFG2_LSX;
/*
* if LASX is supported by CPU, it is also supported by KVM,
* as we implement it.
*/
if (cpu_has_lasx) if (cpu_has_lasx)
*v |= CPUCFG2_LASX; *v |= CPUCFG2_LASX;
break; return 0;
default: default:
ret = -EINVAL; /*
break; * No restrictions on other valid CPUCFG IDs' values, but
* CPUCFG data is limited to 32 bits as the LoongArch ISA
* manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
*/
*v = U32_MAX;
return 0;
} }
return ret;
} }
static int kvm_check_cpucfg(int id, u64 val) static int kvm_check_cpucfg(int id, u64 val)
{ {
u64 mask; int ret;
int ret = 0; u64 mask = 0;
if (id < 0 && id >= KVM_MAX_CPUCFG_REGS)
return -EINVAL;
if (_kvm_get_cpucfg(id, &mask)) ret = _kvm_get_cpucfg_mask(id, &mask);
if (ret)
return ret; return ret;
if (val & ~mask)
/* Unsupported features and/or the higher 32 bits should not be set */
return -EINVAL;
switch (id) { switch (id) {
case 2: case 2:
/* CPUCFG2 features checking */ if (!(val & CPUCFG2_LLFTP))
if (val & ~mask) /* Guests must have a constant timer */
/* The unsupported features should not be set */ return -EINVAL;
ret = -EINVAL; if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
else if (!(val & CPUCFG2_LLFTP)) /* Single and double float point must both be set when FP is enabled */
/* The LLFTP must be set, as guest must has a constant timer */ return -EINVAL;
ret = -EINVAL; if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) /* LSX architecturally implies FP but val does not satisfy that */
/* Single and double float point must both be set when enable FP */ return -EINVAL;
ret = -EINVAL; if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) /* LASX architecturally implies LSX and FP but val does not satisfy that */
/* FP should be set when enable LSX */ return -EINVAL;
ret = -EINVAL; return 0;
else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
/* LSX, FP should be set when enable LASX, and FP has been checked before. */
ret = -EINVAL;
break;
default: default:
break; /*
* Values for the other CPUCFG IDs are not being further validated
* besides the mask check above.
*/
return 0;
} }
return ret;
} }
static int kvm_get_one_reg(struct kvm_vcpu *vcpu, static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
...@@ -566,7 +565,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, ...@@ -566,7 +565,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu,
uint64_t val; uint64_t val;
uint64_t __user *uaddr = (uint64_t __user *)attr->addr; uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
ret = _kvm_get_cpucfg(attr->attr, &val); ret = _kvm_get_cpucfg_mask(attr->attr, &val);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment