Commit d7a5aa4b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v6.11-2024-08-15' of...

Merge tag 'perf-tools-fixes-for-v6.11-2024-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools fixes from Namhyung Kim:
 "The usual header file sync-ups and one more build fix:

   - Add README file to explain why we copy the headers

   - Sync UAPI and other header files with kernel source

   - Fix build on MIPS 32-bit"

* tag 'perf-tools-fixes-for-v6.11-2024-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools:
  perf daemon: Fix the build on 32-bit architectures
  tools/include: Sync arm64 headers with the kernel sources
  tools/include: Sync x86 headers with the kernel sources
  tools/include: Sync filesystem headers with the kernel sources
  tools/include: Sync network socket headers with the kernel sources
  tools/include: Sync uapi/asm-generic/unistd.h with the kernel sources
  tools/include: Sync uapi/sound/asound.h with the kernel sources
  tools/include: Sync uapi/linux/perf.h with the kernel sources
  tools/include: Sync uapi/linux/kvm.h with the kernel sources
  tools/include: Sync uapi/drm/i915_drm.h with the kernel sources
  perf tools: Add tools/include/uapi/README
parents e724918b 4bbe6002
......@@ -86,9 +86,14 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
......@@ -162,9 +167,14 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
......
......@@ -645,6 +645,9 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6)
#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7)
#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
......
This diff is collapsed.
......@@ -566,6 +566,12 @@
#define MSR_RELOAD_PMC0 0x000014c1
#define MSR_RELOAD_FIXED_CTR0 0x00001309
/* V6 PMON MSR range */
#define MSR_IA32_PMC_V6_GP0_CTR 0x1900
#define MSR_IA32_PMC_V6_GP0_CFG_A 0x1901
#define MSR_IA32_PMC_V6_FX0_CTR 0x1980
#define MSR_IA32_PMC_V6_STEP 4
/* KeyID partitioning between MKTME and TDX */
#define MSR_IA32_MKTME_KEYID_PARTITIONING 0x00000087
......@@ -660,6 +666,8 @@
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
#define MSR_SVSM_CAA 0xc001f000
/* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1
......@@ -781,6 +789,8 @@
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
#define MSR_K7_HWCR_CPB_DIS_BIT 25
#define MSR_K7_HWCR_CPB_DIS BIT_ULL(MSR_K7_HWCR_CPB_DIS_BIT)
/* K6 MSRs */
#define MSR_K6_WHCR 0xc0000082
......@@ -1164,6 +1174,7 @@
#define MSR_IA32_QM_CTR 0xc8e
#define MSR_IA32_PQR_ASSOC 0xc8f
#define MSR_IA32_L3_CBM_BASE 0xc90
#define MSR_RMID_SNC_CONFIG 0xca0
#define MSR_IA32_L2_CBM_BASE 0xd10
#define MSR_IA32_MBA_THRTL_BASE 0xd50
......
......@@ -106,6 +106,7 @@ struct kvm_ioapic_state {
#define KVM_RUN_X86_SMM (1 << 0)
#define KVM_RUN_X86_BUS_LOCK (1 << 1)
#define KVM_RUN_X86_GUEST_MODE (1 << 2)
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
......@@ -697,6 +698,11 @@ enum sev_cmd_id {
/* Second time is the charm; improved versions of the above ioctls. */
KVM_SEV_INIT2,
/* SNP-specific commands */
KVM_SEV_SNP_LAUNCH_START = 100,
KVM_SEV_SNP_LAUNCH_UPDATE,
KVM_SEV_SNP_LAUNCH_FINISH,
KVM_SEV_NR_MAX,
};
......@@ -824,6 +830,48 @@ struct kvm_sev_receive_update_data {
__u32 pad2;
};
struct kvm_sev_snp_launch_start {
__u64 policy;
__u8 gosvw[16];
__u16 flags;
__u8 pad0[6];
__u64 pad1[4];
};
/* Kept in sync with firmware values for simplicity. */
#define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1
#define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3
#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4
#define KVM_SEV_SNP_PAGE_TYPE_SECRETS 0x5
#define KVM_SEV_SNP_PAGE_TYPE_CPUID 0x6
struct kvm_sev_snp_launch_update {
__u64 gfn_start;
__u64 uaddr;
__u64 len;
__u8 type;
__u8 pad0;
__u16 flags;
__u32 pad1;
__u64 pad2[4];
};
#define KVM_SEV_SNP_ID_BLOCK_SIZE 96
#define KVM_SEV_SNP_ID_AUTH_SIZE 4096
#define KVM_SEV_SNP_FINISH_DATA_SIZE 32
struct kvm_sev_snp_launch_finish {
__u64 id_block_uaddr;
__u64 id_auth_uaddr;
__u8 id_block_en;
__u8 auth_key_en;
__u8 vcek_disabled;
__u8 host_data[KVM_SEV_SNP_FINISH_DATA_SIZE];
__u8 pad0[3];
__u16 flags;
__u64 pad1[4];
};
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
......@@ -874,5 +922,6 @@ struct kvm_hyperv_eventfd {
#define KVM_X86_SW_PROTECTED_VM 1
#define KVM_X86_SEV_VM 2
#define KVM_X86_SEV_ES_VM 3
#define KVM_X86_SNP_VM 4
#endif /* _ASM_X86_KVM_H */
......@@ -115,6 +115,7 @@
#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
......
Why we want a copy of kernel headers in tools?
==============================================
There used to be no copies, with tools/ code using kernel headers
directly. From time to time tools/perf/ broke due to legitimate kernel
hacking. At some point Linus complained about such direct usage. Then we
adopted the current model.
The way these headers are used in perf are not restricted to just
including them to compile something.
There are sometimes used in scripts that convert defines into string
tables, etc, so some change may break one of these scripts, or new MSRs
may use some different #define pattern, etc.
E.g.:
$ ls -1 tools/perf/trace/beauty/*.sh | head -5
tools/perf/trace/beauty/arch_errno_names.sh
tools/perf/trace/beauty/drm_ioctl.sh
tools/perf/trace/beauty/fadvise.sh
tools/perf/trace/beauty/fsconfig.sh
tools/perf/trace/beauty/fsmount.sh
$
$ tools/perf/trace/beauty/fadvise.sh
static const char *fadvise_advices[] = {
[0] = "NORMAL",
[1] = "RANDOM",
[2] = "SEQUENTIAL",
[3] = "WILLNEED",
[4] = "DONTNEED",
[5] = "NOREUSE",
};
$
The tools/perf/check-headers.sh script, part of the tools/ build
process, points out changes in the original files.
So its important not to touch the copies in tools/ when doing changes in
the original kernel headers, that will be done later, when
check-headers.sh inform about the change to the perf tools hackers.
Another explanation from Ingo Molnar:
It's better than all the alternatives we tried so far:
- Symbolic links and direct #includes: this was the original approach but
was pushed back on from the kernel side, when tooling modified the
headers and broke them accidentally for kernel builds.
- Duplicate self-defined ABI headers like glibc: double the maintenance
burden, double the chance for mistakes, plus there's no tech-driven
notification mechanism to look at new kernel side changes.
What we are doing now is a third option:
- A software-enforced copy-on-write mechanism of kernel headers to
tooling, driven by non-fatal warnings on the tooling side build when
kernel headers get modified:
Warning: Kernel ABI header differences:
diff -u tools/include/uapi/drm/i915_drm.h include/uapi/drm/i915_drm.h
diff -u tools/include/uapi/linux/fs.h include/uapi/linux/fs.h
diff -u tools/include/uapi/linux/kvm.h include/uapi/linux/kvm.h
...
The tooling policy is to always pick up the kernel side headers as-is,
and integate them into the tooling build. The warnings above serve as a
notification to tooling maintainers that there's changes on the kernel
side.
We've been using this for many years now, and it might seem hacky, but
works surprisingly well.
......@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
......
......@@ -2163,6 +2163,15 @@ struct drm_i915_gem_context_param {
* supports this per context flag.
*/
#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe
/*
* I915_CONTEXT_PARAM_CONTEXT_IMAGE:
*
* Allows userspace to provide own context images.
*
* Note that this is a debug API not available on production kernel builds.
*/
#define I915_CONTEXT_PARAM_CONTEXT_IMAGE 0xf
/* Must be kept compact -- no holes and well documented */
/** @value: Context parameter value to be set or queried */
......@@ -2564,6 +2573,24 @@ struct i915_context_param_engines {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
struct i915_gem_context_param_context_image {
/** @engine: Engine class & instance to be configured. */
struct i915_engine_class_instance engine;
/** @flags: One of the supported flags or zero. */
__u32 flags;
#define I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX (1u << 0)
/** @size: Size of the image blob pointed to by @image. */
__u32 size;
/** @mbz: Must be zero. */
__u32 mbz;
/** @image: Userspace memory containing the context image. */
__u64 image;
} __attribute__((packed));
/**
* struct drm_i915_gem_context_create_ext_setparam - Context parameter
* to set or query during context creation.
......
......@@ -81,6 +81,8 @@ enum {
#define IPPROTO_ETHERNET IPPROTO_ETHERNET
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_SMC = 256, /* Shared Memory Communications */
#define IPPROTO_SMC IPPROTO_SMC
IPPROTO_MPTCP = 262, /* Multipath TCP connection */
#define IPPROTO_MPTCP IPPROTO_MPTCP
IPPROTO_MAX
......
......@@ -192,11 +192,24 @@ struct kvm_xen_exit {
/* Flags that describe what fields in emulation_failure hold valid data. */
#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
/*
* struct kvm_run can be modified by userspace at any time, so KVM must be
* careful to avoid TOCTOU bugs. In order to protect KVM, HINT_UNSAFE_IN_KVM()
* renames fields in struct kvm_run from <symbol> to <symbol>__unsafe when
* compiled into the kernel, ensuring that any use within KVM is obvious and
* gets extra scrutiny.
*/
#ifdef __KERNEL__
#define HINT_UNSAFE_IN_KVM(_symbol) _symbol##__unsafe
#else
#define HINT_UNSAFE_IN_KVM(_symbol) _symbol
#endif
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
__u8 request_interrupt_window;
__u8 immediate_exit;
__u8 HINT_UNSAFE_IN_KVM(immediate_exit);
__u8 padding1[6];
/* out */
......@@ -918,6 +931,8 @@ struct kvm_enable_cap {
#define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235
#define KVM_CAP_PRE_FAULT_MEMORY 236
#define KVM_CAP_X86_APIC_BUS_CYCLES_NS 237
#define KVM_CAP_X86_GUEST_MODE 238
struct kvm_irq_routing_irqchip {
__u32 irqchip;
......
......@@ -1349,12 +1349,14 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
/* 5-0x7 available */
#define PERF_MEM_LVLNUM_L2_MHB 0x05 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x06 /* Memory-side Cache */
/* 0x7 available */
#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB / L1 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
#define PERF_MEM_LVLNUM_PMEM 0x0e /* PMEM */
#define PERF_MEM_LVLNUM_NA 0x0f /* N/A */
......
......@@ -126,9 +126,15 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
__u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
__u64 __spare3[11]; /* Spare space for future expansion */
__u64 stx_subvol; /* Subvolume identifier */
__u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
__u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
/* 0xb0 */
__u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
__u32 __spare1[1];
/* 0xb8 */
__u64 __spare3[9]; /* Spare space for future expansion */
/* 0x100 */
};
......@@ -157,6 +163,7 @@ struct statx {
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
......@@ -192,6 +199,7 @@ struct statx {
#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
#define STATX_ATTR_WRITE_ATOMIC 0x00400000 /* File supports atomic write operations */
#endif /* _UAPI_LINUX_STAT_H */
......@@ -230,8 +230,10 @@
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
179 64 pread64 sys_pread64
179 spu pread64 sys_pread64
180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
180 64 pwrite64 sys_pwrite64
180 spu pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
......@@ -246,6 +248,7 @@
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
191 64 readahead sys_readahead
191 spu readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
......@@ -293,6 +296,7 @@
232 nospu set_tid_address sys_set_tid_address
233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
233 64 fadvise64 sys_fadvise64
233 spu fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_ni_syscall
236 common epoll_create sys_epoll_create
......@@ -502,7 +506,7 @@
412 32 utimensat_time64 sys_utimensat sys_utimensat
413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
416 32 io_pgetevents_time64 sys_io_pgetevents compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
......
......@@ -418,7 +418,7 @@
412 32 utimensat_time64 - sys_utimensat
413 32 pselect6_time64 - compat_sys_pselect6_time64
414 32 ppoll_time64 - compat_sys_ppoll_time64
416 32 io_pgetevents_time64 - sys_io_pgetevents
416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
418 32 mq_timedsend_time64 - sys_mq_timedsend
419 32 mq_timedreceive_time64 - sys_mq_timedreceive
......
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
#
# 64-bit system call numbers and entry vectors
#
# The format is:
# <number> <abi> <name> <entry point>
# <number> <abi> <name> <entry point> [<compat entry point> [noreturn]]
#
# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls
#
......@@ -68,7 +69,7 @@
57 common fork sys_fork
58 common vfork sys_vfork
59 64 execve sys_execve
60 common exit sys_exit
60 common exit sys_exit - noreturn
61 common wait4 sys_wait4
62 common kill sys_kill
63 common uname sys_newuname
......@@ -239,7 +240,7 @@
228 common clock_gettime sys_clock_gettime
229 common clock_getres sys_clock_getres
230 common clock_nanosleep sys_clock_nanosleep
231 common exit_group sys_exit_group
231 common exit_group sys_exit_group - noreturn
232 common epoll_wait sys_epoll_wait
233 common epoll_ctl sys_epoll_ctl
234 common tgkill sys_tgkill
......@@ -343,6 +344,7 @@
332 common statx sys_statx
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
335 common uretprobe sys_uretprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
......
// SPDX-License-Identifier: GPL-2.0
#include <internal/lib.h>
#include <inttypes.h>
#include <subcmd/parse-options.h>
#include <api/fd/array.h>
#include <api/fs/fs.h>
......@@ -688,7 +689,7 @@ static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
/* lock */
csv_sep, daemon->base, "lock");
fprintf(out, "%c%lu",
fprintf(out, "%c%" PRIu64,
/* session up time */
csv_sep, (curr - daemon->start) / 60);
......@@ -700,7 +701,7 @@ static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
daemon->base, SESSION_OUTPUT);
fprintf(out, " lock: %s/lock\n",
daemon->base);
fprintf(out, " up: %lu minutes\n",
fprintf(out, " up: %" PRIu64 " minutes\n",
(curr - daemon->start) / 60);
}
}
......@@ -727,7 +728,7 @@ static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
/* session ack */
csv_sep, session->base, SESSION_ACK);
fprintf(out, "%c%lu",
fprintf(out, "%c%" PRIu64,
/* session up time */
csv_sep, (curr - session->start) / 60);
......@@ -745,7 +746,7 @@ static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
session->base, SESSION_CONTROL);
fprintf(out, " ack: %s/%s\n",
session->base, SESSION_ACK);
fprintf(out, " up: %lu minutes\n",
fprintf(out, " up: %" PRIu64 " minutes\n",
(curr - session->start) / 60);
}
}
......
......@@ -76,7 +76,7 @@ struct msghdr {
__kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
struct ubuf_info *msg_ubuf;
int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
int (*sg_from_iter)(struct sk_buff *skb,
struct iov_iter *from, size_t length);
};
......@@ -442,11 +442,14 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
extern int __sys_socket(int family, int type, int protocol);
extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
extern int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
extern int __sys_listen_socket(struct socket *sock, int backlog);
extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
int __user *usockaddr_len);
extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
......
......@@ -329,12 +329,17 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO negation of O_APPEND */
#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
/* Atomic Write */
#define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040)
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
RWF_APPEND | RWF_NOAPPEND)
RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC)
#define PROCFS_IOCTL_MAGIC 'f'
/* Pagemap ioctl */
#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
#define PAGEMAP_SCAN _IOWR(PROCFS_IOCTL_MAGIC, 16, struct pm_scan_arg)
/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */
#define PAGE_IS_WPALLOWED (1 << 0)
......@@ -393,4 +398,158 @@ struct pm_scan_arg {
__u64 return_mask;
};
/* /proc/<pid>/maps ioctl */
#define PROCMAP_QUERY _IOWR(PROCFS_IOCTL_MAGIC, 17, struct procmap_query)
enum procmap_query_flags {
/*
* VMA permission flags.
*
* Can be used as part of procmap_query.query_flags field to look up
* only VMAs satisfying specified subset of permissions. E.g., specifying
* PROCMAP_QUERY_VMA_READABLE only will return both readable and read/write VMAs,
* while having PROCMAP_QUERY_VMA_READABLE | PROCMAP_QUERY_VMA_WRITABLE will only
* return read/write VMAs, though both executable/non-executable and
* private/shared will be ignored.
*
* PROCMAP_QUERY_VMA_* flags are also returned in procmap_query.vma_flags
* field to specify actual VMA permissions.
*/
PROCMAP_QUERY_VMA_READABLE = 0x01,
PROCMAP_QUERY_VMA_WRITABLE = 0x02,
PROCMAP_QUERY_VMA_EXECUTABLE = 0x04,
PROCMAP_QUERY_VMA_SHARED = 0x08,
/*
* Query modifier flags.
*
* By default VMA that covers provided address is returned, or -ENOENT
* is returned. With PROCMAP_QUERY_COVERING_OR_NEXT_VMA flag set, closest
* VMA with vma_start > addr will be returned if no covering VMA is
* found.
*
* PROCMAP_QUERY_FILE_BACKED_VMA instructs query to consider only VMAs that
* have file backing. Can be combined with PROCMAP_QUERY_COVERING_OR_NEXT_VMA
* to iterate all VMAs with file backing.
*/
PROCMAP_QUERY_COVERING_OR_NEXT_VMA = 0x10,
PROCMAP_QUERY_FILE_BACKED_VMA = 0x20,
};
/*
* Input/output argument structured passed into ioctl() call. It can be used
* to query a set of VMAs (Virtual Memory Areas) of a process.
*
* Each field can be one of three kinds, marked in a short comment to the
* right of the field:
* - "in", input argument, user has to provide this value, kernel doesn't modify it;
* - "out", output argument, kernel sets this field with VMA data;
* - "in/out", input and output argument; user provides initial value (used
* to specify maximum allowable buffer size), and kernel sets it to actual
* amount of data written (or zero, if there is no data).
*
* If matching VMA is found (according to criterias specified by
* query_addr/query_flags, all the out fields are filled out, and ioctl()
* returns 0. If there is no matching VMA, -ENOENT will be returned.
* In case of any other error, negative error code other than -ENOENT is
* returned.
*
* Most of the data is similar to the one returned as text in /proc/<pid>/maps
* file, but procmap_query provides more querying flexibility. There are no
* consistency guarantees between subsequent ioctl() calls, but data returned
* for matched VMA is self-consistent.
*/
struct procmap_query {
/* Query struct size, for backwards/forward compatibility */
__u64 size;
/*
* Query flags, a combination of enum procmap_query_flags values.
* Defines query filtering and behavior, see enum procmap_query_flags.
*
* Input argument, provided by user. Kernel doesn't modify it.
*/
__u64 query_flags; /* in */
/*
* Query address. By default, VMA that covers this address will
* be looked up. PROCMAP_QUERY_* flags above modify this default
* behavior further.
*
* Input argument, provided by user. Kernel doesn't modify it.
*/
__u64 query_addr; /* in */
/* VMA starting (inclusive) and ending (exclusive) address, if VMA is found. */
__u64 vma_start; /* out */
__u64 vma_end; /* out */
/* VMA permissions flags. A combination of PROCMAP_QUERY_VMA_* flags. */
__u64 vma_flags; /* out */
/* VMA backing page size granularity. */
__u64 vma_page_size; /* out */
/*
* VMA file offset. If VMA has file backing, this specifies offset
* within the file that VMA's start address corresponds to.
* Is set to zero if VMA has no backing file.
*/
__u64 vma_offset; /* out */
/* Backing file's inode number, or zero, if VMA has no backing file. */
__u64 inode; /* out */
/* Backing file's device major/minor number, or zero, if VMA has no backing file. */
__u32 dev_major; /* out */
__u32 dev_minor; /* out */
/*
* If set to non-zero value, signals the request to return VMA name
* (i.e., VMA's backing file's absolute path, with " (deleted)" suffix
* appended, if file was unlinked from FS) for matched VMA. VMA name
* can also be some special name (e.g., "[heap]", "[stack]") or could
* be even user-supplied with prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME).
*
* Kernel will set this field to zero, if VMA has no associated name.
* Otherwise kernel will return actual amount of bytes filled in
* user-supplied buffer (see vma_name_addr field below), including the
* terminating zero.
*
* If VMA name is longer that user-supplied maximum buffer size,
* -E2BIG error is returned.
*
* If this field is set to non-zero value, vma_name_addr should point
* to valid user space memory buffer of at least vma_name_size bytes.
* If set to zero, vma_name_addr should be set to zero as well
*/
__u32 vma_name_size; /* in/out */
/*
* If set to non-zero value, signals the request to extract and return
* VMA's backing file's build ID, if the backing file is an ELF file
* and it contains embedded build ID.
*
* Kernel will set this field to zero, if VMA has no backing file,
* backing file is not an ELF file, or ELF file has no build ID
* embedded.
*
* Build ID is a binary value (not a string). Kernel will set
* build_id_size field to exact number of bytes used for build ID.
* If build ID is requested and present, but needs more bytes than
* user-supplied maximum buffer size (see build_id_addr field below),
* -E2BIG error will be returned.
*
* If this field is set to non-zero value, build_id_addr should point
* to valid user space memory buffer of at least build_id_size bytes.
* If set to zero, build_id_addr should be set to zero as well
*/
__u32 build_id_size; /* in/out */
/*
* User-supplied address of a buffer of at least vma_name_size bytes
* for kernel to fill with matched VMA's name (see vma_name_size field
* description above for details).
*
* Should be set to zero if VMA name should not be returned.
*/
__u64 vma_name_addr; /* in */
/*
* User-supplied address of a buffer of at least build_id_size bytes
* for kernel to fill with matched VMA's ELF build ID, if available
* (see build_id_size field description above for details).
*
* Should be set to zero if build ID should not be returned.
*/
__u64 build_id_addr; /* in */
};
#endif /* _UAPI_LINUX_FS_H */
......@@ -154,7 +154,7 @@ struct mount_attr {
*/
struct statmount {
__u32 size; /* Total size, including strings */
__u32 __spare1;
__u32 mnt_opts; /* [str] Mount options of the mount */
__u64 mask; /* What results were written */
__u32 sb_dev_major; /* Device ID */
__u32 sb_dev_minor;
......@@ -172,7 +172,8 @@ struct statmount {
__u64 propagate_from; /* Propagation from in current namespace */
__u32 mnt_root; /* [str] Root of mount relative to root of fs */
__u32 mnt_point; /* [str] Mountpoint relative to current root */
__u64 __spare2[50];
__u64 mnt_ns_id; /* ID of the mount namespace */
__u64 __spare2[49];
char str[]; /* Variable size part containing strings */
};
......@@ -188,10 +189,12 @@ struct mnt_id_req {
__u32 spare;
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
};
/* List of all mnt_id_req versions. */
#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
#define MNT_ID_REQ_SIZE_VER1 32 /* sizeof second published struct */
/*
* @mask bits for statmount(2)
......@@ -202,10 +205,13 @@ struct mnt_id_req {
#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
#define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */
#define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */
/*
* Special @mnt_id values that can be passed to listmount
*/
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
#endif /* _UAPI_LINUX_MOUNT_H */
......@@ -126,9 +126,15 @@ struct statx {
__u64 stx_mnt_id;
__u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
__u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
__u64 stx_subvol; /* Subvolume identifier */
/* 0xa0 */
__u64 __spare3[11]; /* Spare space for future expansion */
__u64 stx_subvol; /* Subvolume identifier */
__u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
__u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
/* 0xb0 */
__u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
__u32 __spare1[1];
/* 0xb8 */
__u64 __spare3[9]; /* Spare space for future expansion */
/* 0x100 */
};
......@@ -157,6 +163,7 @@ struct statx {
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
......@@ -192,6 +199,7 @@ struct statx {
#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
#define STATX_ATTR_WRITE_ATOMIC 0x00400000 /* File supports atomic write operations */
#endif /* _UAPI_LINUX_STAT_H */
......@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 18)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
......@@ -334,7 +334,7 @@ union snd_pcm_sync_id {
unsigned char id[16];
unsigned short id16[8];
unsigned int id32[4];
};
} __attribute__((deprecated));
struct snd_pcm_info {
unsigned int device; /* RO/WR (control): device number */
......@@ -348,7 +348,7 @@ struct snd_pcm_info {
int dev_subclass; /* SNDRV_PCM_SUBCLASS_* */
unsigned int subdevices_count;
unsigned int subdevices_avail;
union snd_pcm_sync_id sync; /* hardware synchronization ID */
unsigned char pad1[16]; /* was: hardware synchronization ID */
unsigned char reserved[64]; /* reserved for future... */
};
......@@ -420,7 +420,8 @@ struct snd_pcm_hw_params {
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
unsigned char reserved[64]; /* reserved for future */
unsigned char sync[16]; /* R: synchronization ID (perfect sync - one clock source) */
unsigned char reserved[48]; /* reserved for future */
};
enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment