Commit 22261fdf authored by Thomas Gleixner's avatar Thomas Gleixner

Merge tag 'perf-urgent-for-mingo-5.1-20190329' of...

Merge tag 'perf-urgent-for-mingo-5.1-20190329' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo:

Core libraries:
  Jiri Olsa:
  - Fix max perf_event_attr.precise_ip detection.

  Kan Liang:
  - Fix parser error for uncore event alias

  Wei Lin:
  - Fixup ordering of kernel maps after obtaining the main kernel map address.

Intel PT:
  Adrian Hunter:
  - Fix TSC slip where A TSC packet can slip past MTC packets so that the
    timestamp appears to go backwards.

  - Fixes for exported-sql-viewer GUI conversion to python3.

ARM coresight:
  Solomon Tan:
  - Fix the build by adding a missing case value for enumeration value introduced
    in newer library, that now is the required one.

tool headers:
  Arnaldo Carvalho de Melo:
  - Syncronize kernel headers with the kernel, getting new io_uring and
    pidfd_send_signal syscalls so that 'perf trace' can handle them.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parents 1a9df9e2 e94d6b7f
This diff is collapsed.
...@@ -596,6 +596,7 @@ config ARCH_DAVINCI ...@@ -596,6 +596,7 @@ config ARCH_DAVINCI
select HAVE_IDE select HAVE_IDE
select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO
select RESET_CONTROLLER select RESET_CONTROLLER
select SPARSE_IRQ select SPARSE_IRQ
select USE_OF select USE_OF
......
...@@ -93,7 +93,7 @@ i2s_alt2: i2s_alt2 { ...@@ -93,7 +93,7 @@ i2s_alt2: i2s_alt2 {
}; };
&hdmi { &hdmi {
hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
}; };
&pwm { &pwm {
......
...@@ -114,9 +114,9 @@ phy_port3: phy@2 { ...@@ -114,9 +114,9 @@ phy_port3: phy@2 {
reg = <2>; reg = <2>;
}; };
switch@0 { switch@10 {
compatible = "qca,qca8334"; compatible = "qca,qca8334";
reg = <0>; reg = <10>;
switch_ports: ports { switch_ports: ports {
#address-cells = <1>; #address-cells = <1>;
...@@ -125,7 +125,7 @@ switch_ports: ports { ...@@ -125,7 +125,7 @@ switch_ports: ports {
ethphy0: port@0 { ethphy0: port@0 {
reg = <0>; reg = <0>;
label = "cpu"; label = "cpu";
phy-mode = "rgmii"; phy-mode = "rgmii-id";
ethernet = <&fec>; ethernet = <&fec>;
fixed-link { fixed-link {
......
...@@ -264,7 +264,7 @@ &usdhc3 { ...@@ -264,7 +264,7 @@ &usdhc3 {
pinctrl-2 = <&pinctrl_usdhc3_200mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
vmcc-supply = <&reg_sd3_vmmc>; vmcc-supply = <&reg_sd3_vmmc>;
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
bus-witdh = <4>; bus-width = <4>;
no-1-8-v; no-1-8-v;
status = "okay"; status = "okay";
}; };
...@@ -275,7 +275,7 @@ &usdhc4 { ...@@ -275,7 +275,7 @@ &usdhc4 {
pinctrl-1 = <&pinctrl_usdhc4_100mhz>; pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
pinctrl-2 = <&pinctrl_usdhc4_200mhz>; pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
vmcc-supply = <&reg_sd4_vmmc>; vmcc-supply = <&reg_sd4_vmmc>;
bus-witdh = <8>; bus-width = <8>;
no-1-8-v; no-1-8-v;
non-removable; non-removable;
status = "okay"; status = "okay";
......
...@@ -91,6 +91,7 @@ &fec { ...@@ -91,6 +91,7 @@ &fec {
pinctrl-0 = <&pinctrl_enet>; pinctrl-0 = <&pinctrl_enet>;
phy-handle = <&ethphy>; phy-handle = <&ethphy>;
phy-mode = "rgmii"; phy-mode = "rgmii";
phy-reset-duration = <10>; /* in msecs */
phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
phy-supply = <&vdd_eth_io_reg>; phy-supply = <&vdd_eth_io_reg>;
status = "disabled"; status = "disabled";
......
// SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright (C) 2017 NXP * Copyright (C) 2017 NXP
......
...@@ -213,12 +213,13 @@ spi { ...@@ -213,12 +213,13 @@ spi {
gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
/* /*
* It's not actually active high, but the frameworks assume * This chipselect is active high. Just setting the flags
* the polarity of the passed-in GPIO is "normal" (active * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
* high) then actively drives the line low to select the * it will be ignored, only the special "spi-cs-high" flag
* chip. * really counts.
*/ */
cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
spi-cs-high;
num-chipselects = <1>; num-chipselects = <1>;
/* /*
......
...@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y ...@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y CONFIG_IIO=y
CONFIG_FSL_MX25_ADC=y CONFIG_FSL_MX25_ADC=y
CONFIG_PWM=y
CONFIG_PWM_IMX1=y
CONFIG_PWM_IMX27=y
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set # CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y CONFIG_VFAT_FS=y
......
...@@ -398,7 +398,7 @@ CONFIG_MAG3110=y ...@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
CONFIG_MPL3115=y CONFIG_MPL3115=y
CONFIG_PWM=y CONFIG_PWM=y
CONFIG_PWM_FSL_FTM=y CONFIG_PWM_FSL_FTM=y
CONFIG_PWM_IMX=y CONFIG_PWM_IMX27=y
CONFIG_NVMEM_IMX_OCOTP=y CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_VF610_OCOTP=y CONFIG_NVMEM_VF610_OCOTP=y
CONFIG_TEE=y CONFIG_TEE=y
......
...@@ -16,30 +16,23 @@ ...@@ -16,30 +16,23 @@
#include "cpuidle.h" #include "cpuidle.h"
#include "hardware.h" #include "hardware.h"
static atomic_t master = ATOMIC_INIT(0); static int num_idle_cpus = 0;
static DEFINE_SPINLOCK(master_lock); static DEFINE_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev, static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
if (atomic_inc_return(&master) == num_online_cpus()) { spin_lock(&cpuidle_lock);
/* if (++num_idle_cpus == num_online_cpus())
* With this lock, we prevent other cpu to exit and enter
* this function again and become the master.
*/
if (!spin_trylock(&master_lock))
goto idle;
imx6_set_lpm(WAIT_UNCLOCKED); imx6_set_lpm(WAIT_UNCLOCKED);
cpu_do_idle(); spin_unlock(&cpuidle_lock);
imx6_set_lpm(WAIT_CLOCKED);
spin_unlock(&master_lock);
goto done;
}
idle:
cpu_do_idle(); cpu_do_idle();
done:
atomic_dec(&master); spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus())
imx6_set_lpm(WAIT_CLOCKED);
spin_unlock(&cpuidle_lock);
return index; return index;
} }
......
...@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void) ...@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
return; return;
m4if_base = of_iomap(np, 0); m4if_base = of_iomap(np, 0);
of_node_put(np);
if (!m4if_base) { if (!m4if_base) {
pr_err("Unable to map M4IF registers\n"); pr_err("Unable to map M4IF registers\n");
return; return;
......
...@@ -27,6 +27,7 @@ config ARCH_BCM2835 ...@@ -27,6 +27,7 @@ config ARCH_BCM2835
bool "Broadcom BCM2835 family" bool "Broadcom BCM2835 family"
select TIMER_OF select TIMER_OF
select GPIOLIB select GPIOLIB
select MFD_CORE
select PINCTRL select PINCTRL
select PINCTRL_BCM2835 select PINCTRL_BCM2835
select ARM_AMBA select ARM_AMBA
......
...@@ -321,7 +321,6 @@ sdmmc4: sdhci@3460000 { ...@@ -321,7 +321,6 @@ sdmmc4: sdhci@3460000 {
nvidia,default-trim = <0x9>; nvidia,default-trim = <0x9>;
nvidia,dqs-trim = <63>; nvidia,dqs-trim = <63>;
mmc-hs400-1_8v; mmc-hs400-1_8v;
supports-cqe;
status = "disabled"; status = "disabled";
}; };
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Device Tree Source for the RZ/G2E (R8A774C0) SoC * Device Tree Source for the RZ/G2E (R8A774C0) SoC
* *
* Copyright (C) 2018 Renesas Electronics Corp. * Copyright (C) 2018-2019 Renesas Electronics Corp.
*/ */
#include <dt-bindings/clock/r8a774c0-cpg-mssr.h> #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
...@@ -1150,9 +1150,8 @@ scif5: serial@e6f30000 { ...@@ -1150,9 +1150,8 @@ scif5: serial@e6f30000 {
<&cpg CPG_CORE R8A774C0_CLK_S3D1C>, <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
<&scif_clk>; <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk"; clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
<&dmac2 0x5b>, <&dmac2 0x5a>; dma-names = "tx", "rx";
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 202>; resets = <&cpg 202>;
status = "disabled"; status = "disabled";
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Device Tree Source for the R-Car E3 (R8A77990) SoC * Device Tree Source for the R-Car E3 (R8A77990) SoC
* *
* Copyright (C) 2018 Renesas Electronics Corp. * Copyright (C) 2018-2019 Renesas Electronics Corp.
*/ */
#include <dt-bindings/clock/r8a77990-cpg-mssr.h> #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
...@@ -1067,9 +1067,8 @@ scif5: serial@e6f30000 { ...@@ -1067,9 +1067,8 @@ scif5: serial@e6f30000 {
<&cpg CPG_CORE R8A77990_CLK_S3D1C>, <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
<&scif_clk>; <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk"; clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
<&dmac2 0x5b>, <&dmac2 0x5a>; dma-names = "tx", "rx";
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 202>; resets = <&cpg 202>;
status = "disabled"; status = "disabled";
......
...@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid, ...@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
return reg1; return reg1;
} }
/*
* Interface to tell the AP bus code that a configuration
* change has happened. The bus code should at least do
* an ap bus resource rescan.
*/
#if IS_ENABLED(CONFIG_ZCRYPT)
void ap_bus_cfg_chg(void);
#else
static inline void ap_bus_cfg_chg(void){};
#endif
#endif /* _ASM_S390_AP_H_ */ #endif /* _ASM_S390_AP_H_ */
...@@ -252,11 +252,14 @@ do { \ ...@@ -252,11 +252,14 @@ do { \
/* /*
* Cache aliasing on the latest machines calls for a mapping granularity * Cache aliasing on the latest machines calls for a mapping granularity
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization * of 512KB for the anonymous mapping base. For 64-bit processes use a
* of up to 1GB. For 31-bit processes the virtual address space is limited, * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
* use no alignment and limit the randomization to 8MB. * the virtual address space is limited, use no alignment and limit the
* randomization to 8MB.
* For the additional randomization of the program break use 32MB for
* 64-bit and 8MB for 31-bit.
*/ */
#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK #define STACK_RND_MASK MMAP_RND_MASK
......
...@@ -91,52 +91,53 @@ struct lowcore { ...@@ -91,52 +91,53 @@ struct lowcore {
__u64 hardirq_timer; /* 0x02e8 */ __u64 hardirq_timer; /* 0x02e8 */
__u64 softirq_timer; /* 0x02f0 */ __u64 softirq_timer; /* 0x02f0 */
__u64 steal_timer; /* 0x02f8 */ __u64 steal_timer; /* 0x02f8 */
__u64 last_update_timer; /* 0x0300 */ __u64 avg_steal_timer; /* 0x0300 */
__u64 last_update_clock; /* 0x0308 */ __u64 last_update_timer; /* 0x0308 */
__u64 int_clock; /* 0x0310 */ __u64 last_update_clock; /* 0x0310 */
__u64 mcck_clock; /* 0x0318 */ __u64 int_clock; /* 0x0318*/
__u64 clock_comparator; /* 0x0320 */ __u64 mcck_clock; /* 0x0320 */
__u64 boot_clock[2]; /* 0x0328 */ __u64 clock_comparator; /* 0x0328 */
__u64 boot_clock[2]; /* 0x0330 */
/* Current process. */ /* Current process. */
__u64 current_task; /* 0x0338 */ __u64 current_task; /* 0x0340 */
__u64 kernel_stack; /* 0x0340 */ __u64 kernel_stack; /* 0x0348 */
/* Interrupt, DAT-off and restartstack. */ /* Interrupt, DAT-off and restartstack. */
__u64 async_stack; /* 0x0348 */ __u64 async_stack; /* 0x0350 */
__u64 nodat_stack; /* 0x0350 */ __u64 nodat_stack; /* 0x0358 */
__u64 restart_stack; /* 0x0358 */ __u64 restart_stack; /* 0x0360 */
/* Restart function and parameter. */ /* Restart function and parameter. */
__u64 restart_fn; /* 0x0360 */ __u64 restart_fn; /* 0x0368 */
__u64 restart_data; /* 0x0368 */ __u64 restart_data; /* 0x0370 */
__u64 restart_source; /* 0x0370 */ __u64 restart_source; /* 0x0378 */
/* Address space pointer. */ /* Address space pointer. */
__u64 kernel_asce; /* 0x0378 */ __u64 kernel_asce; /* 0x0380 */
__u64 user_asce; /* 0x0380 */ __u64 user_asce; /* 0x0388 */
__u64 vdso_asce; /* 0x0388 */ __u64 vdso_asce; /* 0x0390 */
/* /*
* The lpp and current_pid fields form a * The lpp and current_pid fields form a
* 64-bit value that is set as program * 64-bit value that is set as program
* parameter with the LPP instruction. * parameter with the LPP instruction.
*/ */
__u32 lpp; /* 0x0390 */ __u32 lpp; /* 0x0398 */
__u32 current_pid; /* 0x0394 */ __u32 current_pid; /* 0x039c */
/* SMP info area */ /* SMP info area */
__u32 cpu_nr; /* 0x0398 */ __u32 cpu_nr; /* 0x03a0 */
__u32 softirq_pending; /* 0x039c */ __u32 softirq_pending; /* 0x03a4 */
__u32 preempt_count; /* 0x03a0 */ __u32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03a4 */ __u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03a8 */ __u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03ac */ __u32 fpu_flags; /* 0x03b4 */
__u64 percpu_offset; /* 0x03b0 */ __u64 percpu_offset; /* 0x03b8 */
__u64 vdso_per_cpu_data; /* 0x03b8 */ __u64 vdso_per_cpu_data; /* 0x03c0 */
__u64 machine_flags; /* 0x03c0 */ __u64 machine_flags; /* 0x03c8 */
__u64 gmap; /* 0x03c8 */ __u64 gmap; /* 0x03d0 */
__u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
/* br %r1 trampoline */ /* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */ __u16 br_r1_trampoline; /* 0x0400 */
......
...@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event) ...@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
*/ */
static int __hw_perf_event_init(struct perf_event *event) static int __hw_perf_event_init(struct perf_event *event)
{ {
struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct perf_event_attr *attr = &event->attr; struct perf_event_attr *attr = &event->attr;
struct cpu_cf_events *cpuhw;
enum cpumf_ctr_set i; enum cpumf_ctr_set i;
int err = 0; int err = 0;
debug_sprintf_event(cf_diag_dbg, 5, debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
"%s event %p cpu %d authorized %#x\n", __func__, event, event->cpu);
event, event->cpu, cpuhw->info.auth_ctl);
event->hw.config = attr->config; event->hw.config = attr->config;
event->hw.config_base = 0; event->hw.config_base = 0;
local64_set(&event->count, 0);
/* Add all authorized counter sets to config_base */ /* Add all authorized counter sets to config_base. The
* the hardware init function is either called per-cpu or just once
* for all CPUS (event->cpu == -1). This depends on the whether
* counting is started for all CPUs or on a per workload base where
* the perf event moves from one CPU to another CPU.
* Checking the authorization on any CPU is fine as the hardware
* applies the same authorization settings to all CPUs.
*/
cpuhw = &get_cpu_var(cpu_cf_events);
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
event->hw.config_base |= cpumf_ctr_ctl[i]; event->hw.config_base |= cpumf_ctr_ctl[i];
put_cpu_var(cpu_cf_events);
/* No authorized counter sets, nothing to count/sample */ /* No authorized counter sets, nothing to count/sample */
if (!event->hw.config_base) { if (!event->hw.config_base) {
......
...@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) ...@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->percpu_offset = __per_cpu_offset[cpu]; lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce; lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags; lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer = lc->steal_timer = 0; lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15); __ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area); save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
......
...@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime, ...@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
*/ */
static int do_account_vtime(struct task_struct *tsk) static int do_account_vtime(struct task_struct *tsk)
{ {
u64 timer, clock, user, guest, system, hardirq, softirq, steal; u64 timer, clock, user, guest, system, hardirq, softirq;
timer = S390_lowcore.last_update_timer; timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock; clock = S390_lowcore.last_update_clock;
...@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk) ...@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
if (softirq) if (softirq)
account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
}
return virt_timer_forward(user + guest + system + hardirq + softirq); return virt_timer_forward(user + guest + system + hardirq + softirq);
} }
...@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev) ...@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
*/ */
void vtime_flush(struct task_struct *tsk) void vtime_flush(struct task_struct *tsk)
{ {
u64 steal, avg_steal;
if (do_account_vtime(tsk)) if (do_account_vtime(tsk))
virt_timer_expire(); virt_timer_expire();
steal = S390_lowcore.steal_timer;
avg_steal = S390_lowcore.avg_steal_timer / 2;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(steal);
avg_steal += steal;
}
S390_lowcore.avg_steal_timer = avg_steal;
} }
/* /*
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/crw.h> #include <asm/crw.h>
#include <asm/isc.h> #include <asm/isc.h>
#include <asm/ebcdic.h> #include <asm/ebcdic.h>
#include <asm/ap.h>
#include "css.h" #include "css.h"
#include "cio.h" #include "cio.h"
...@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) ...@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
" failed (rc=%d).\n", ret); " failed (rc=%d).\n", ret);
} }
static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
{
CIO_CRW_EVENT(3, "chsc: ap config changed\n");
if (sei_area->rs != 5)
return;
ap_bus_cfg_chg();
}
static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{ {
switch (sei_area->cc) { switch (sei_area->cc) {
...@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) ...@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
case 2: /* i/o resource accessibility */ case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area); chsc_process_sei_res_acc(sei_area);
break; break;
case 3: /* ap config changed */
chsc_process_sei_ap_cfg_chg(sei_area);
break;
case 7: /* channel-path-availability information */ case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area); chsc_process_sei_chp_avail(sei_area);
break; break;
......
...@@ -72,12 +72,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) ...@@ -72,12 +72,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
{ {
struct vfio_ccw_private *private; struct vfio_ccw_private *private;
struct irb *irb; struct irb *irb;
bool is_final;
private = container_of(work, struct vfio_ccw_private, io_work); private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb; irb = &private->irb;
is_final = !(scsw_actl(&irb->scsw) &
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) { if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw); cp_update_scsw(&private->cp, &irb->scsw);
if (is_final)
cp_free(&private->cp); cp_free(&private->cp);
} }
memcpy(private->io_region->irb_area, irb, sizeof(*irb)); memcpy(private->io_region->irb_area, irb, sizeof(*irb));
...@@ -85,7 +89,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) ...@@ -85,7 +89,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
if (private->io_trigger) if (private->io_trigger)
eventfd_signal(private->io_trigger, 1); eventfd_signal(private->io_trigger, 1);
if (private->mdev) if (private->mdev && is_final)
private->state = VFIO_CCW_STATE_IDLE; private->state = VFIO_CCW_STATE_IDLE;
} }
......
...@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev) ...@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev); struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv; struct ap_driver *ap_drv = ap_dev->drv;
/* prepare ap queue device removal */
if (is_queue_dev(dev)) if (is_queue_dev(dev))
ap_queue_remove(to_ap_queue(dev)); ap_queue_prepare_remove(to_ap_queue(dev));
/* driver's chance to clean up gracefully */
if (ap_drv->remove) if (ap_drv->remove)
ap_drv->remove(ap_dev); ap_drv->remove(ap_dev);
/* now do the ap queue device remove */
if (is_queue_dev(dev))
ap_queue_remove(to_ap_queue(dev));
/* Remove queue/card from list of active queues/cards */ /* Remove queue/card from list of active queues/cards */
spin_lock_bh(&ap_list_lock); spin_lock_bh(&ap_list_lock);
if (is_card_dev(dev)) if (is_card_dev(dev))
...@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void) ...@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void)
} }
EXPORT_SYMBOL(ap_bus_force_rescan); EXPORT_SYMBOL(ap_bus_force_rescan);
/*
* A config change has happened, force an ap bus rescan.
*/
void ap_bus_cfg_chg(void)
{
AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
/* /*
* hex2bitmap() - parse hex mask string and set bitmap. * hex2bitmap() - parse hex mask string and set bitmap.
* Valid strings are "0x012345678" with at least one valid hex number. * Valid strings are "0x012345678" with at least one valid hex number.
......
...@@ -91,6 +91,7 @@ enum ap_state { ...@@ -91,6 +91,7 @@ enum ap_state {
AP_STATE_WORKING, AP_STATE_WORKING,
AP_STATE_QUEUE_FULL, AP_STATE_QUEUE_FULL,
AP_STATE_SUSPEND_WAIT, AP_STATE_SUSPEND_WAIT,
AP_STATE_REMOVE, /* about to be removed from driver */
AP_STATE_UNBOUND, /* momentary not bound to a driver */ AP_STATE_UNBOUND, /* momentary not bound to a driver */
AP_STATE_BORKED, /* broken */ AP_STATE_BORKED, /* broken */
NR_AP_STATES NR_AP_STATES
...@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void); ...@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq); void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev);
......
...@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { ...@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_POLL] = ap_sm_suspend_read, [AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop, [AP_EVENT_TIMEOUT] = ap_sm_nop,
}, },
[AP_STATE_REMOVE] = {
[AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_UNBOUND] = { [AP_STATE_UNBOUND] = {
[AP_EVENT_POLL] = ap_sm_nop, [AP_EVENT_POLL] = ap_sm_nop,
[AP_EVENT_TIMEOUT] = ap_sm_nop, [AP_EVENT_TIMEOUT] = ap_sm_nop,
...@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq) ...@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
} }
EXPORT_SYMBOL(ap_flush_queue); EXPORT_SYMBOL(ap_flush_queue);
void ap_queue_remove(struct ap_queue *aq) void ap_queue_prepare_remove(struct ap_queue *aq)
{ {
ap_flush_queue(aq); spin_lock_bh(&aq->lock);
/* flush queue */
__ap_flush_queue(aq);
/* set REMOVE state to prevent new messages are queued in */
aq->state = AP_STATE_REMOVE;
del_timer_sync(&aq->timeout); del_timer_sync(&aq->timeout);
spin_unlock_bh(&aq->lock);
}
/* reset with zero, also clears irq registration */ void ap_queue_remove(struct ap_queue *aq)
{
/*
* all messages have been flushed and the state is
* AP_STATE_REMOVE. Now reset with zero which also
* clears the irq registration and move the state
* to AP_STATE_UNBOUND to signal that this queue
* is not used by any driver currently.
*/
spin_lock_bh(&aq->lock); spin_lock_bh(&aq->lock);
ap_zapq(aq->qid); ap_zapq(aq->qid);
aq->state = AP_STATE_UNBOUND; aq->state = AP_STATE_UNBOUND;
spin_unlock_bh(&aq->lock); spin_unlock_bh(&aq->lock);
} }
EXPORT_SYMBOL(ap_queue_remove);
void ap_queue_reinit_state(struct ap_queue *aq) void ap_queue_reinit_state(struct ap_queue *aq)
{ {
...@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq) ...@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock); spin_unlock_bh(&aq->lock);
} }
EXPORT_SYMBOL(ap_queue_reinit_state);
...@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) ...@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq, struct zcrypt_queue *zq,
struct module **pmod,
unsigned int weight) unsigned int weight)
{ {
if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
...@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, ...@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
atomic_add(weight, &zc->load); atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load); atomic_add(weight, &zq->load);
zq->request_count++; zq->request_count++;
*pmod = zq->queue->ap_dev.drv->driver.owner;
return zq; return zq;
} }
static inline void zcrypt_drop_queue(struct zcrypt_card *zc, static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq, struct zcrypt_queue *zq,
struct module *mod,
unsigned int weight) unsigned int weight)
{ {
struct module *mod = zq->queue->ap_dev.drv->driver.owner;
zq->request_count--; zq->request_count--;
atomic_sub(weight, &zc->load); atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load); atomic_sub(weight, &zq->load);
...@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, ...@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
unsigned int weight, pref_weight; unsigned int weight, pref_weight;
unsigned int func_code; unsigned int func_code;
int qid = 0, rc = -ENODEV; int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
...@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, ...@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_weight = weight; pref_weight = weight;
} }
} }
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
if (!pref_zq) { if (!pref_zq) {
...@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, ...@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
spin_lock(&zcrypt_list_lock); spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight); zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
out: out:
...@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, ...@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
unsigned int weight, pref_weight; unsigned int weight, pref_weight;
unsigned int func_code; unsigned int func_code;
int qid = 0, rc = -ENODEV; int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT); trace_s390_zcrypt_req(crt, TP_ICARSACRT);
...@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, ...@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_weight = weight; pref_weight = weight;
} }
} }
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
if (!pref_zq) { if (!pref_zq) {
...@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, ...@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
spin_lock(&zcrypt_list_lock); spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight); zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
out: out:
...@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, ...@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
unsigned int func_code; unsigned int func_code;
unsigned short *domain; unsigned short *domain;
int qid = 0, rc = -ENODEV; int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
...@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, ...@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
pref_weight = weight; pref_weight = weight;
} }
} }
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
if (!pref_zq) { if (!pref_zq) {
...@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms, ...@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
spin_lock(&zcrypt_list_lock); spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight); zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
out: out:
...@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, ...@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
unsigned int func_code; unsigned int func_code;
struct ap_message ap_msg; struct ap_message ap_msg;
int qid = 0, rc = -ENODEV; int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
...@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, ...@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
pref_weight = weight; pref_weight = weight;
} }
} }
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
if (!pref_zq) { if (!pref_zq) {
...@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, ...@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
spin_lock(&zcrypt_list_lock); spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight); zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
out_free: out_free:
...@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer) ...@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
struct ap_message ap_msg; struct ap_message ap_msg;
unsigned int domain; unsigned int domain;
int qid = 0, rc = -ENODEV; int qid = 0, rc = -ENODEV;
struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
...@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer) ...@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
pref_weight = weight; pref_weight = weight;
} }
} }
pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
if (!pref_zq) { if (!pref_zq) {
...@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer) ...@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock); spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, weight); zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
spin_unlock(&zcrypt_list_lock); spin_unlock(&zcrypt_list_lock);
out: out:
......
...@@ -150,7 +150,12 @@ struct bcm2835_power { ...@@ -150,7 +150,12 @@ struct bcm2835_power {
static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
{ {
u64 start = ktime_get_ns(); u64 start;
if (!reg)
return 0;
start = ktime_get_ns();
/* Enable the module's async AXI bridges. */ /* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
...@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) ...@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
{ {
u64 start = ktime_get_ns(); u64 start;
if (!reg)
return 0;
start = ktime_get_ns();
/* Enable the module's async AXI bridges. */ /* Enable the module's async AXI bridges. */
ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
...@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain) ...@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
} }
} }
static void static int
bcm2835_init_power_domain(struct bcm2835_power *power, bcm2835_init_power_domain(struct bcm2835_power *power,
int pd_xlate_index, const char *name) int pd_xlate_index, const char *name)
{ {
...@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power, ...@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
dom->clk = devm_clk_get(dev->parent, name); dom->clk = devm_clk_get(dev->parent, name);
if (IS_ERR(dom->clk)) {
int ret = PTR_ERR(dom->clk);
if (ret == -EPROBE_DEFER)
return ret;
/* Some domains don't have a clk, so make sure that we
* don't deref an error pointer later.
*/
dom->clk = NULL;
}
dom->base.name = name; dom->base.name = name;
dom->base.power_on = bcm2835_power_pd_power_on; dom->base.power_on = bcm2835_power_pd_power_on;
...@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power, ...@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
pm_genpd_init(&dom->base, NULL, true); pm_genpd_init(&dom->base, NULL, true);
power->pd_xlate.domains[pd_xlate_index] = &dom->base; power->pd_xlate.domains[pd_xlate_index] = &dom->base;
return 0;
} }
/** bcm2835_reset_reset - Resets a block that has a reset line in the /** bcm2835_reset_reset - Resets a block that has a reset line in the
...@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev) ...@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
{ BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
}; };
int ret, i; int ret = 0, i;
u32 id; u32 id;
power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
...@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev) ...@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
bcm2835_init_power_domain(power, i, power_domain_names[i]); ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
if (ret)
goto fail;
}
for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
...@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev) ...@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
ret = devm_reset_controller_register(dev, &power->reset); ret = devm_reset_controller_register(dev, &power->reset);
if (ret) if (ret)
return ret; goto fail;
of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
dev_info(dev, "Broadcom BCM2835 power domains driver"); dev_info(dev, "Broadcom BCM2835 power domains driver");
return 0; return 0;
fail:
for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
struct generic_pm_domain *dom = &power->domains[i].base;
if (dom->name)
pm_genpd_remove(dom);
}
return ret;
} }
static int bcm2835_power_remove(struct platform_device *pdev) static int bcm2835_power_remove(struct platform_device *pdev)
......
...@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr) ...@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
xdr_encode_AFS_StoreStatus(&bp, attr); xdr_encode_AFS_StoreStatus(&bp, attr);
*bp++ = 0; /* position of start of write */ *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
*bp++ = 0; *bp++ = htonl((u32) attr->ia_size);
*bp++ = 0; /* size of write */ *bp++ = 0; /* size of write */
*bp++ = 0; *bp++ = 0;
*bp++ = htonl(attr->ia_size >> 32); /* new file length */ *bp++ = htonl(attr->ia_size >> 32); /* new file length */
...@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) ...@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
xdr_encode_AFS_StoreStatus(&bp, attr); xdr_encode_AFS_StoreStatus(&bp, attr);
*bp++ = 0; /* position of start of write */ *bp++ = htonl(attr->ia_size); /* position of start of write */
*bp++ = 0; /* size of write */ *bp++ = 0; /* size of write */
*bp++ = htonl(attr->ia_size); /* new file length */ *bp++ = htonl(attr->ia_size); /* new file length */
......
...@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr) ...@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &vnode->fid); bp = xdr_encode_YFSFid(bp, &vnode->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr); bp = xdr_encode_YFS_StoreStatus(bp, attr);
bp = xdr_encode_u64(bp, 0); /* position of start of write */ bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
bp = xdr_encode_u64(bp, 0); /* size of write */ bp = xdr_encode_u64(bp, 0); /* size of write */
bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
yfs_check_req(call, bp); yfs_check_req(call, bp);
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x40000 #define MAP_NONBLOCK 0x40000
#define MAP_NORESERVE 0x10000 #define MAP_NORESERVE 0x10000
#define MAP_POPULATE 0x20000 #define MAP_POPULATE 0x20000
#define MAP_PRIVATE 0x02
#define MAP_SHARED 0x01
#define MAP_STACK 0x80000 #define MAP_STACK 0x80000
#define PROT_EXEC 0x4 #define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000 #define PROT_GROWSDOWN 0x01000000
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#define MAP_NONBLOCK 0x20000 #define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400 #define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000 #define MAP_POPULATE 0x10000
#define MAP_PRIVATE 0x002
#define MAP_SHARED 0x001
#define MAP_STACK 0x40000 #define MAP_STACK 0x40000
#define PROT_EXEC 0x04 #define PROT_EXEC 0x04
#define PROT_GROWSDOWN 0x01000000 #define PROT_GROWSDOWN 0x01000000
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000 #define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x4000 #define MAP_NORESERVE 0x4000
#define MAP_POPULATE 0x10000 #define MAP_POPULATE 0x10000
#define MAP_PRIVATE 0x02
#define MAP_SHARED 0x01
#define MAP_STACK 0x40000 #define MAP_STACK 0x40000
#define PROT_EXEC 0x4 #define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000 #define PROT_GROWSDOWN 0x01000000
......
...@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char { ...@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58) #define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57) #define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56) #define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63) #define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62) #define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61) #define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
/* Per-vcpu XICS interrupt controller state */ /* Per-vcpu XICS interrupt controller state */
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) #define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
......
...@@ -344,6 +344,7 @@ ...@@ -344,6 +344,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#define MAP_NONBLOCK 0x20000 #define MAP_NONBLOCK 0x20000
#define MAP_NORESERVE 0x0400 #define MAP_NORESERVE 0x0400
#define MAP_POPULATE 0x10000 #define MAP_POPULATE 0x10000
#define MAP_PRIVATE 0x002
#define MAP_SHARED 0x001
#define MAP_STACK 0x40000 #define MAP_STACK 0x40000
#define PROT_EXEC 0x4 #define PROT_EXEC 0x4
#define PROT_GROWSDOWN 0x01000000 #define PROT_GROWSDOWN 0x01000000
......
...@@ -4,9 +4,9 @@ ...@@ -4,9 +4,9 @@
/* /*
* Check OpenCSD library version is sufficient to provide required features * Check OpenCSD library version is sufficient to provide required features
*/ */
#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0)) #define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
#error "OpenCSD >= 0.10.0 is required" #error "OpenCSD >= 0.11.0 is required"
#endif #endif
int main(void) int main(void)
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
#include <asm-generic/mman-common.h>
/* We need this because we need to have tools/include/uapi/ included in the tools
* header search path to get access to stuff that is not yet in the system's
* copy of the files in that directory, but since this cset:
*
* 746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
*
* We end up making sys/mman.h, that is in the system headers, to not find the
* MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
* of asm-generic/mman-common.h. So we define them here and include this header
* from each of the per arch mman.h headers.
*/
#ifndef MAP_SHARED
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
#endif
#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
...@@ -15,9 +15,7 @@ ...@@ -15,9 +15,7 @@
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
#define MAP_SHARED 0x01 /* Share changes */ /* 0x01 - 0x03 are defined in linux/mman.h */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
#define MAP_TYPE 0x0f /* Mask for type of mapping */ #define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */ #define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_ANONYMOUS 0x20 /* don't use a file */
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_MMAN_H #ifndef __ASM_GENERIC_MMAN_H
#define __ASM_GENERIC_MMAN_H #define __ASM_GENERIC_MMAN_H
#include <asm-generic/mman-common.h> #include <asm-generic/mman-common-tools.h>
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
......
...@@ -824,8 +824,17 @@ __SYSCALL(__NR_futex_time64, sys_futex) ...@@ -824,8 +824,17 @@ __SYSCALL(__NR_futex_time64, sys_futex)
__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) __SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
#endif #endif
#define __NR_pidfd_send_signal 424
__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
#define __NR_io_uring_setup 425
__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
#define __NR_io_uring_enter 426
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
#define __NR_io_uring_register 427
__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 424 #define __NR_syscalls 428
/* /*
* 32 bit systems traditionally used different * 32 bit systems traditionally used different
......
...@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param { ...@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
#define I915_CONTEXT_DEFAULT_PRIORITY 0 #define I915_CONTEXT_DEFAULT_PRIORITY 0
#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
/*
* When using the following param, value should be a pointer to
* drm_i915_gem_context_param_sseu.
*/
#define I915_CONTEXT_PARAM_SSEU 0x7
__u64 value; __u64 value;
}; };
/**
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
* a context to run with a reduced number of SSEU (where SSEU stands for Slice/
* Sub-slice/EU).
*
* This is done by configuring SSEU configuration using the below
* @struct drm_i915_gem_context_param_sseu for every supported engine which
* userspace intends to use.
*
* Not all GPUs or engines support this functionality in which case an error
* code -ENODEV will be returned.
*
* Also, flexibility of possible SSEU configuration permutations varies between
* GPU generations and software imposed limitations. Requesting such a
* combination will return an error code of -EINVAL.
*
* NOTE: When perf/OA is active the context's SSEU configuration is ignored in
* favour of a single global setting.
*/
struct drm_i915_gem_context_param_sseu {
/*
* Engine class & instance to be configured or queried.
*/
__u16 engine_class;
__u16 engine_instance;
/*
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
/*
* Mask of slices to enable for the context. Valid values are a subset
* of the bitmask value returned for I915_PARAM_SLICE_MASK.
*/
__u64 slice_mask;
/*
* Mask of subslices to enable for the context. Valid values are a
* subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
*/
__u64 subslice_mask;
/*
* Minimum/Maximum number of EUs to enable per subslice for the
* context. min_eus_per_subslice must be inferior or equal to
* max_eus_per_subslice.
*/
__u16 min_eus_per_subslice;
__u16 max_eus_per_subslice;
/*
* Unused for now. Must be cleared to zero.
*/
__u32 rsvd;
};
enum drm_i915_oa_format { enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */ I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */ I915_OA_FORMAT_A29, /* HSW only */
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ #define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
#define F_SEAL_GROW 0x0004 /* prevent file from growing */ #define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */ #define F_SEAL_WRITE 0x0008 /* prevent writes */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
/* (1U << 31) is reserved for signed error codes */ /* (1U << 31) is reserved for signed error codes */
/* /*
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
#define OVERCOMMIT_ALWAYS 1 #define OVERCOMMIT_ALWAYS 1
#define OVERCOMMIT_NEVER 2 #define OVERCOMMIT_NEVER 2
#define MAP_SHARED 0x01 /* Share changes */
#define MAP_PRIVATE 0x02 /* Changes are private */
#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
/* /*
* Huge page size encoding when MAP_HUGETLB is specified, and a huge page * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
* size other than the default is desired. See hugetlb_encode.h. * size other than the default is desired. See hugetlb_encode.h.
......
...@@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t ...@@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl) $(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
$(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
mount_flags_array := $(beauty_outdir)/mount_flags_array.c mount_flags_array := $(beauty_outdir)/mount_flags_array.c
mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
......
...@@ -345,6 +345,10 @@ ...@@ -345,6 +345,10 @@
334 common rseq __x64_sys_rseq 334 common rseq __x64_sys_rseq
# don't use numbers 387 through 423, add new calls after the last # don't use numbers 387 through 423, add new calls after the last
# 'common' entry # 'common' entry
424 common pidfd_send_signal __x64_sys_pidfd_send_signal
425 common io_uring_setup __x64_sys_io_uring_setup
426 common io_uring_enter __x64_sys_io_uring_enter
427 common io_uring_register __x64_sys_io_uring_register
# #
# x32-specific system call numbers start at 512 to avoid cache impact # x32-specific system call numbers start at 512 to avoid cache impact
......
...@@ -103,7 +103,7 @@ done ...@@ -103,7 +103,7 @@ done
# diff with extra ignore lines # diff with extra ignore lines
check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"' check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
# diff non-symmetric files # diff non-symmetric files
......
...@@ -107,6 +107,7 @@ import os ...@@ -107,6 +107,7 @@ import os
from PySide.QtCore import * from PySide.QtCore import *
from PySide.QtGui import * from PySide.QtGui import *
from PySide.QtSql import * from PySide.QtSql import *
pyside_version_1 = True
from decimal import * from decimal import *
from ctypes import * from ctypes import *
from multiprocessing import Process, Array, Value, Event from multiprocessing import Process, Array, Value, Event
...@@ -1526,6 +1527,19 @@ def BranchDataPrep(query): ...@@ -1526,6 +1527,19 @@ def BranchDataPrep(query):
" (" + dsoname(query.value(15)) + ")") " (" + dsoname(query.value(15)) + ")")
return data return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
# Branch data model # Branch data model
class BranchModel(TreeModel): class BranchModel(TreeModel):
...@@ -1553,7 +1567,11 @@ class BranchModel(TreeModel): ...@@ -1553,7 +1567,11 @@ class BranchModel(TreeModel):
" AND evsel_id = " + str(self.event_id) + " AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id" " ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz)) " LIMIT " + str(glb_chunk_sz))
self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) if pyside_version_1 and sys.version_info[0] == 3:
prep = BranchDataPrepWA
else:
prep = BranchDataPrep
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update) self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz) self.fetcher.Fetch(glb_chunk_sz)
...@@ -2079,14 +2097,6 @@ def IsSelectable(db, table, sql = ""): ...@@ -2079,14 +2097,6 @@ def IsSelectable(db, table, sql = ""):
return False return False
return True return True
# SQL data preparation
def SQLTableDataPrep(query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL table data model item # SQL table data model item
class SQLTableItem(): class SQLTableItem():
...@@ -2110,7 +2120,7 @@ class SQLTableModel(TableModel): ...@@ -2110,7 +2120,7 @@ class SQLTableModel(TableModel):
self.more = True self.more = True
self.populated = 0 self.populated = 0
self.column_headers = column_headers self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update) self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz) self.fetcher.Fetch(glb_chunk_sz)
...@@ -2154,6 +2164,12 @@ class SQLTableModel(TableModel): ...@@ -2154,6 +2164,12 @@ class SQLTableModel(TableModel):
def columnHeader(self, column): def columnHeader(self, column):
return self.column_headers[column] return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model # SQL automatic table data model
class SQLAutoTableModel(SQLTableModel): class SQLAutoTableModel(SQLTableModel):
...@@ -2182,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel): ...@@ -2182,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next(): while query.next():
column_headers.append(query.value(0)) column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents # Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject): class ResizeColumnsToContentsBase(QObject):
...@@ -2868,9 +2908,13 @@ class LibXED(): ...@@ -2868,9 +2908,13 @@ class LibXED():
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok: if not ok:
return 0, "" return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text # Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166 # For now, assume the length is in byte 166
return inst.xedd[166], inst.buffer.value return inst.xedd[166], result
def TryOpen(file_name): def TryOpen(file_name):
try: try:
...@@ -2886,9 +2930,14 @@ def Is64Bit(f): ...@@ -2886,9 +2930,14 @@ def Is64Bit(f):
header = f.read(7) header = f.read(7)
f.seek(pos) f.seek(pos)
magic = header[0:4] magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4]) eclass = ord(header[4])
encoding = ord(header[5]) encoding = ord(header[5])
version = ord(header[6]) version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False result = True if eclass == 2 else False
return result return result
......
#!/bin/sh #!/bin/sh
# SPDX-License-Identifier: LGPL-2.1 # SPDX-License-Identifier: LGPL-2.1
if [ $# -ne 2 ] ; then if [ $# -ne 3 ] ; then
[ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
linux_header_dir=tools/include/uapi/linux
header_dir=tools/include/uapi/asm-generic header_dir=tools/include/uapi/asm-generic
arch_header_dir=tools/arch/${hostarch}/include/uapi/asm arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
else else
header_dir=$1 linux_header_dir=$1
arch_header_dir=$2 header_dir=$2
arch_header_dir=$3
fi fi
linux_mman=${linux_header_dir}/mman.h
arch_mman=${arch_header_dir}/mman.h arch_mman=${arch_header_dir}/mman.h
# those in egrep -vw are flags, we want just the bits # those in egrep -vw are flags, we want just the bits
...@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \ ...@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
(egrep $regex ${arch_mman} | \ (egrep $regex ${arch_mman} | \
sed -r "s/$regex/\2 \1/g" | \ sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
egrep -q $regex ${linux_mman} && \
(egrep $regex ${linux_mman} | \
egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) && ([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
(egrep $regex ${header_dir}/mman-common.h | \ (egrep $regex ${header_dir}/mman-common.h | \
egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
......
...@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, ...@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
break; break;
case OCSD_INSTR_ISB: case OCSD_INSTR_ISB:
case OCSD_INSTR_DSB_DMB: case OCSD_INSTR_DSB_DMB:
case OCSD_INSTR_WFI_WFE:
case OCSD_INSTR_OTHER: case OCSD_INSTR_OTHER:
default: default:
packet->last_instr_taken_branch = false; packet->last_instr_taken_branch = false;
......
...@@ -231,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist) ...@@ -231,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
} }
} }
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.exclude_kernel = 1,
.precise_ip = 3,
};
event_attr_init(&attr);
/*
* Unnamed union member, not supported as struct member named
* initializer in older compilers such as gcc 4.4.7
*/
attr.sample_period = 1;
while (attr.precise_ip != 0) {
int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
if (fd != -1) {
close(fd);
break;
}
--attr.precise_ip;
}
pattr->precise_ip = attr.precise_ip;
}
int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
{ {
struct perf_evsel *evsel = perf_evsel__new_cycles(precise); struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
......
...@@ -315,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist, ...@@ -315,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
void perf_evlist__set_tracking_event(struct perf_evlist *evlist, void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
struct perf_evsel *tracking_evsel); struct perf_evsel *tracking_evsel);
void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
struct perf_evsel * struct perf_evsel *
perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
......
...@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise) ...@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (!precise) if (!precise)
goto new_event; goto new_event;
perf_event_attr__set_max_precise_ip(&attr);
/* /*
* Now let the usual logic to set up the perf_event_attr defaults * Now let the usual logic to set up the perf_event_attr defaults
* to kick in when we return and before perf_evsel__open() is called. * to kick in when we return and before perf_evsel__open() is called.
...@@ -305,6 +304,8 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise) ...@@ -305,6 +304,8 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
if (evsel == NULL) if (evsel == NULL)
goto out; goto out;
evsel->precise_max = true;
/* use asprintf() because free(evsel) assumes name is allocated */ /* use asprintf() because free(evsel) assumes name is allocated */
if (asprintf(&evsel->name, "cycles%s%s%.*s", if (asprintf(&evsel->name, "cycles%s%s%.*s",
(attr.precise_ip || attr.exclude_kernel) ? ":" : "", (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
...@@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, ...@@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
} }
if (evsel->precise_max) if (evsel->precise_max)
perf_event_attr__set_max_precise_ip(attr); attr->precise_ip = 3;
if (opts->all_user) { if (opts->all_user) {
attr->exclude_kernel = 1; attr->exclude_kernel = 1;
...@@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel, ...@@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
return true; return true;
} }
static void display_attr(struct perf_event_attr *attr)
{
if (verbose >= 2) {
fprintf(stderr, "%.60s\n", graph_dotted_line);
fprintf(stderr, "perf_event_attr:\n");
perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
fprintf(stderr, "%.60s\n", graph_dotted_line);
}
}
static int perf_event_open(struct perf_evsel *evsel,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
int precise_ip = evsel->attr.precise_ip;
int fd;
while (1) {
pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpu, group_fd, flags);
fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
if (fd >= 0)
break;
/*
* Do quick precise_ip fallback if:
* - there is precise_ip set in perf_event_attr
* - maximum precise is requested
* - sys_perf_event_open failed with ENOTSUP error,
* which is associated with wrong precise_ip
*/
if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
break;
/*
* We tried all the precise_ip values, and it's
* still failing, so leave it to standard fallback.
*/
if (!evsel->attr.precise_ip) {
evsel->attr.precise_ip = precise_ip;
break;
}
pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
evsel->attr.precise_ip--;
pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
display_attr(&evsel->attr);
}
return fd;
}
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads) struct thread_map *threads)
{ {
...@@ -1824,12 +1878,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1824,12 +1878,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (perf_missing_features.sample_id_all) if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0; evsel->attr.sample_id_all = 0;
if (verbose >= 2) { display_attr(&evsel->attr);
fprintf(stderr, "%.60s\n", graph_dotted_line);
fprintf(stderr, "perf_event_attr:\n");
perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
fprintf(stderr, "%.60s\n", graph_dotted_line);
}
for (cpu = 0; cpu < cpus->nr; cpu++) { for (cpu = 0; cpu < cpus->nr; cpu++) {
...@@ -1841,12 +1890,9 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, ...@@ -1841,12 +1890,9 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
group_fd = get_group_fd(evsel, cpu, thread); group_fd = get_group_fd(evsel, cpu, thread);
retry_open: retry_open:
pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpus->map[cpu], group_fd, flags);
test_attr__ready(); test_attr__ready();
fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu], fd = perf_event_open(evsel, pid, cpus->map[cpu],
group_fd, flags); group_fd, flags);
FD(evsel, cpu, thread) = fd; FD(evsel, cpu, thread) = fd;
......
...@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) ...@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d; decoder->tsc_ctc_ratio_d;
}
/* /*
* Allow for timestamps appearing to backwards because a TSC * A TSC packet can slip past MTC packets so that the timestamp appears
* packet has slipped past a MTC packet, so allow 2 MTC ticks * to go backwards. One estimate is that can be up to about 40 CPU
* or ... * cycles, which is certainly less than 0x1000 TSC ticks, but accept
* slippage an order of magnitude more to be on the safe side.
*/ */
decoder->tsc_slip = multdiv(2 << decoder->mtc_shift, decoder->tsc_slip = 0x10000;
decoder->tsc_ctc_ratio_n,
decoder->tsc_ctc_ratio_d);
}
/* ... or 0x100 paranoia */
if (decoder->tsc_slip < 0x100)
decoder->tsc_slip = 0x100;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
......
...@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine, ...@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
machine->vmlinux_map->end = ~0ULL; machine->vmlinux_map->end = ~0ULL;
} }
static void machine__update_kernel_mmap(struct machine *machine,
u64 start, u64 end)
{
struct map *map = machine__kernel_map(machine);
map__get(map);
map_groups__remove(&machine->kmaps, map);
machine__set_kernel_mmap(machine, start, end);
map_groups__insert(&machine->kmaps, map);
map__put(map);
}
int machine__create_kernel_maps(struct machine *machine) int machine__create_kernel_maps(struct machine *machine)
{ {
struct dso *kernel = machine__get_kernel(machine); struct dso *kernel = machine__get_kernel(machine);
...@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
goto out_put; goto out_put;
} }
/* we have a real start address now, so re-order the kmaps */ /*
map = machine__kernel_map(machine); * we have a real start address now, so re-order the kmaps
* assume it's the last in the kmaps
map__get(map); */
map_groups__remove(&machine->kmaps, map); machine__update_kernel_mmap(machine, addr, ~0ULL);
/* assume it's the last in the kmaps */
machine__set_kernel_mmap(machine, addr, ~0ULL);
map_groups__insert(&machine->kmaps, map);
map__put(map);
} }
if (machine__create_extra_kernel_maps(machine, kernel)) if (machine__create_extra_kernel_maps(machine, kernel))
...@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, ...@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
if (strstr(kernel->long_name, "vmlinux")) if (strstr(kernel->long_name, "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false); dso__set_short_name(kernel, "[kernel.vmlinux]", false);
machine__set_kernel_mmap(machine, event->mmap.start, machine__update_kernel_mmap(machine, event->mmap.start,
event->mmap.start + event->mmap.len); event->mmap.start + event->mmap.len);
/* /*
......
...@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) ...@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
if (!is_arm_pmu_core(name)) { if (!is_arm_pmu_core(name)) {
pname = pe->pmu ? pe->pmu : "cpu"; pname = pe->pmu ? pe->pmu : "cpu";
/*
* uncore alias may be from different PMU
* with common prefix
*/
if (pmu_is_uncore(name) &&
!strncmp(pname, name, strlen(pname)))
goto new_alias;
if (strcmp(pname, name)) if (strcmp(pname, name))
continue; continue;
} }
new_alias:
/* need type casts to override 'const' */ /* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name, __perf_pmu__new_alias(head, NULL, (char *)pe->name,
(char *)pe->desc, (char *)pe->event, (char *)pe->desc, (char *)pe->event,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment