Commit e06fdaf4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gcc-plugins-v4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull structure randomization updates from Kees Cook:
 "Now that IPC and other changes have landed, enable manual markings for
  randstruct plugin, including the task_struct.

  This is the rest of what was staged in -next for the gcc-plugins, and
  comes in three patches, largest first:

   - mark "easy" structs with __randomize_layout

   - mark task_struct with an optional anonymous struct to isolate the
     __randomize_layout section

   - mark structs to opt _out_ of automated marking (which will come
     later)

  And, FWIW, this continues to pass allmodconfig (normal and patched to
  enable gcc-plugins) builds of x86_64, i386, arm64, arm, powerpc, and
  s390 for me"

* tag 'gcc-plugins-v4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  randstruct: opt-out externally exposed function pointer structs
  task_struct: Allow randomized layout
  randstruct: Mark various structs for randomization
parents a90c6ac2 8acdf505
......@@ -116,7 +116,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
};
} __no_randomize_layout;
/*
* Select the calling method
......
......@@ -84,7 +84,7 @@ struct pv_init_ops {
*/
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len);
};
} __no_randomize_layout;
struct pv_lazy_ops {
......@@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
};
} __no_randomize_layout;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
};
} __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
......@@ -176,7 +176,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
};
} __no_randomize_layout;
struct pv_irq_ops {
/*
......@@ -199,7 +199,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
};
} __no_randomize_layout;
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
......@@ -305,7 +305,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
};
} __no_randomize_layout;
struct arch_spinlock;
#ifdef CONFIG_SMP
......@@ -322,7 +322,7 @@ struct pv_lock_ops {
void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted;
};
} __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
......@@ -334,7 +334,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
};
} __no_randomize_layout;
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
......
......@@ -129,7 +129,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
};
} __randomize_layout;
struct cpuid_regs {
u32 eax, ebx, ecx, edx;
......
......@@ -16,7 +16,7 @@ struct mnt_namespace {
u64 event;
unsigned int mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
};
} __randomize_layout;
struct mnt_pcp {
int mnt_count;
......@@ -69,7 +69,7 @@ struct mount {
struct hlist_head mnt_pins;
struct fs_pin mnt_umount;
struct dentry *mnt_ex_mountpoint;
};
} __randomize_layout;
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
......
......@@ -524,7 +524,7 @@ struct nameidata {
struct inode *link_inode;
unsigned root_seq;
int dfd;
};
} __randomize_layout;
static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
{
......
......@@ -51,7 +51,7 @@ struct proc_dir_entry {
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
u8 namelen;
char name[];
};
} __randomize_layout;
union proc_op {
int (*proc_get_link)(struct dentry *, struct path *);
......@@ -70,7 +70,7 @@ struct proc_inode {
struct hlist_node sysctl_inodes;
const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
};
} __randomize_layout;
/*
* General functions
......@@ -279,7 +279,7 @@ struct proc_maps_private {
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
};
} __randomize_layout;
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
......
......@@ -46,7 +46,7 @@ struct linux_binprm {
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
};
} __randomize_layout;
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
......@@ -81,7 +81,7 @@ struct linux_binfmt {
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
};
} __randomize_layout;
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
......
......@@ -17,7 +17,7 @@ struct cdev {
struct list_head list;
dev_t dev;
unsigned int count;
};
} __randomize_layout;
void cdev_init(struct cdev *, const struct file_operations *);
......
......@@ -235,6 +235,7 @@
#endif /* GCC_VERSION >= 40500 */
#if GCC_VERSION >= 40600
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
......@@ -242,7 +243,17 @@
* this.
*/
#define __visible __attribute__((externally_visible))
#endif
/*
* RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
* possible since GCC 4.6. To provide as much build testing coverage
* as possible, this is used for all GCC 4.6+ builds, and not just on
* RANDSTRUCT_PLUGIN builds.
*/
#define randomized_struct_fields_start struct {
#define randomized_struct_fields_end } __randomize_layout;
#endif /* GCC_VERSION >= 40600 */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
......
......@@ -452,6 +452,11 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __no_randomize_layout
#endif
#ifndef randomized_struct_fields_start
# define randomized_struct_fields_start
# define randomized_struct_fields_end
#endif
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
......
......@@ -31,7 +31,7 @@ struct group_info {
atomic_t usage;
int ngroups;
kgid_t gid[0];
};
} __randomize_layout;
/**
* get_group_info - Get a reference to a group info structure
......@@ -145,7 +145,7 @@ struct cred {
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */
};
} __randomize_layout;
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
......
......@@ -118,7 +118,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
};
} __randomize_layout;
/*
* dentry->d_lock spinlock nesting subclasses:
......
......@@ -296,7 +296,7 @@ struct kiocb {
void *private;
int ki_flags;
enum rw_hint ki_hint;
};
} __randomize_layout;
static inline bool is_sync_kiocb(struct kiocb *kiocb)
{
......@@ -404,7 +404,7 @@ struct address_space {
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
errseq_t wb_err;
} __attribute__((aligned(sizeof(long))));
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
......@@ -447,7 +447,7 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
};
} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
......@@ -666,7 +666,7 @@ struct inode {
#endif
void *i_private; /* fs or device private pointer */
};
} __randomize_layout;
static inline unsigned int i_blocksize(const struct inode *node)
{
......@@ -883,7 +883,8 @@ struct file {
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
errseq_t f_wb_err;
} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
......@@ -1020,7 +1021,7 @@ struct file_lock {
int state; /* state of grant or error if -ve */
} afs;
} fl_u;
};
} __randomize_layout;
struct file_lock_context {
spinlock_t flc_lock;
......@@ -1412,7 +1413,7 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
};
} __randomize_layout;
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
......@@ -1698,7 +1699,7 @@ struct file_operations {
u64);
ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
u64);
};
} __randomize_layout;
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
......
......@@ -12,7 +12,7 @@ struct fs_struct {
int umask;
int in_exec;
struct path root, pwd;
};
} __randomize_layout;
extern struct kmem_cache *fs_cachep;
......
......@@ -23,6 +23,6 @@ struct kern_ipc_perm {
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
......@@ -61,7 +61,7 @@ struct ipc_namespace {
struct ucounts *ucounts;
struct ns_common ns;
};
} __randomize_layout;
extern struct ipc_namespace init_ipc_ns;
extern spinlock_t mq_lock;
......
......@@ -45,7 +45,7 @@ struct key_preparsed_payload {
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
time_t expiry; /* Expiry time of key */
};
} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
const char *op, void *aux);
......@@ -158,7 +158,7 @@ struct key_type {
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
};
} __randomize_layout;
extern struct key_type key_type_keyring;
......
......@@ -64,7 +64,7 @@ struct subprocess_info {
int (*init)(struct subprocess_info *info, struct cred *new);
void (*cleanup)(struct subprocess_info *info);
void *data;
};
} __randomize_layout;
extern int
call_usermodehelper(const char *path, char **argv, char **envp, int wait);
......
......@@ -172,7 +172,7 @@ struct kset {
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
};
} __randomize_layout;
extern void kset_init(struct kset *kset);
extern int __must_check kset_register(struct kset *kset);
......
......@@ -1912,7 +1912,7 @@ struct security_hook_heads {
struct list_head audit_rule_match;
struct list_head audit_rule_free;
#endif /* CONFIG_AUDIT */
};
} __randomize_layout;
/*
* Security module hook list structure.
......@@ -1923,7 +1923,7 @@ struct security_hook_list {
struct list_head *head;
union security_list_options hook;
char *lsm;
};
} __randomize_layout;
/*
* Initializing a security_hook_list structure takes
......
......@@ -342,7 +342,7 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
} __randomize_layout;
struct core_thread {
struct task_struct *task;
......@@ -500,7 +500,7 @@ struct mm_struct {
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
};
} __randomize_layout;
extern struct mm_struct init_mm;
......
......@@ -45,7 +45,7 @@ struct module_kobject {
struct kobject *drivers_dir;
struct module_param_attrs *mp;
struct completion *kobj_completion;
};
} __randomize_layout;
struct module_attribute {
struct attribute attr;
......@@ -475,7 +475,7 @@ struct module {
ctor_fn_t *ctors;
unsigned int num_ctors;
#endif
} ____cacheline_aligned;
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
......
......@@ -67,7 +67,7 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
};
} __randomize_layout;
struct file; /* forward dec */
struct path;
......
......@@ -29,7 +29,7 @@ struct msg_queue {
struct list_head q_messages;
struct list_head q_receivers;
struct list_head q_senders;
};
} __randomize_layout;
/* Helper routines for sys_msgsnd and sys_msgrcv */
extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
......
......@@ -7,7 +7,7 @@ struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
};
} __randomize_layout;
extern void path_get(const struct path *);
extern void path_put(const struct path *);
......
......@@ -52,7 +52,7 @@ struct pid_namespace {
int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
};
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
......
......@@ -21,7 +21,7 @@ struct proc_ns_operations {
int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
struct user_namespace *(*owner)(struct ns_common *ns);
struct ns_common *(*get_parent)(struct ns_common *ns);
};
} __randomize_layout;
extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
......
......@@ -426,7 +426,7 @@ struct sched_rt_entity {
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
};
} __randomize_layout;
struct sched_dl_entity {
struct rb_node rb_node;
......@@ -526,6 +526,13 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
/*
* This begins the randomizable portion of task_struct. Only
* scheduling-critical items should be added above here.
*/
randomized_struct_fields_start
void *stack;
atomic_t usage;
/* Per task flags (PF_*), defined further below: */
......@@ -1079,6 +1086,13 @@ struct task_struct {
/* Used by LSM modules for access restriction: */
void *security;
#endif
/*
* New fields for task_struct should be added above here, so that
* they are included in the randomized portion of task_struct.
*/
randomized_struct_fields_end
/* CPU-specific state of this task: */
struct thread_struct thread;
......
......@@ -222,7 +222,7 @@ struct signal_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
};
} __randomize_layout;
/*
* Bits in flags field of signal_struct.
......
......@@ -41,7 +41,7 @@ struct sem_array {
unsigned int use_global_lock;/* >0: global lock required */
struct sem sems[];
};
} __randomize_layout;
#ifdef CONFIG_SYSVIPC
......
......@@ -22,7 +22,7 @@ struct shmid_kernel /* private to the kernel */
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
struct list_head shm_clist; /* list by creator */
};
} __randomize_layout;
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
......
......@@ -120,7 +120,7 @@ struct ctl_table
struct ctl_table_poll *poll;
void *extra1;
void *extra2;
};
} __randomize_layout;
struct ctl_node {
struct rb_node node;
......
......@@ -332,7 +332,7 @@ struct tty_struct {
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
};
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
struct tty_file_private {
......
......@@ -291,7 +291,7 @@ struct tty_operations {
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
const struct file_operations *proc_fops;
};
} __randomize_layout;
struct tty_driver {
int magic; /* magic number for this structure */
......@@ -325,7 +325,7 @@ struct tty_driver {
const struct tty_operations *ops;
struct list_head tty_drivers;
};
} __randomize_layout;
extern struct list_head tty_drivers;
......
......@@ -66,7 +66,7 @@ struct user_namespace {
#endif
struct ucounts *ucounts;
int ucount_max[UCOUNT_COUNTS];
};
} __randomize_layout;
struct ucounts {
struct hlist_node node;
......
......@@ -26,7 +26,7 @@ struct uts_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
struct ns_common ns;
};
} __randomize_layout;
extern struct uts_namespace init_uts_ns;
#ifdef CONFIG_UTS_NS
......
......@@ -37,7 +37,7 @@ struct unix_skb_parms {
u32 secid; /* Security ID */
#endif
u32 consumed;
};
} __randomize_layout;
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
......
......@@ -156,7 +156,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
};
} __randomize_layout;
struct neigh_ops {
int family;
......
......@@ -148,7 +148,7 @@ struct net {
#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
};
} __randomize_layout;
#include <linux/seq_file_net.h>
......
......@@ -1128,7 +1128,7 @@ struct proto {
atomic_t socks;
#endif
int (*diag_destroy)(struct sock *sk, int err);
};
} __randomize_layout;
int proto_register(struct proto *prot, int alloc_slab);
void proto_unregister(struct proto *prot);
......
......@@ -212,7 +212,7 @@ struct futex_pi_state {
atomic_t refcount;
union futex_key key;
};
} __randomize_layout;
/**
* struct futex_q - The hashed futex queue entry, one per waiting task
......@@ -246,7 +246,7 @@ struct futex_q {
struct rt_mutex_waiter *rt_waiter;
union futex_key *requeue_pi_key;
u32 bitset;
};
} __randomize_layout;
static const struct futex_q futex_q_init = {
/* list gets initialized in queue_me()*/
......
......@@ -198,7 +198,7 @@ struct request_key_auth {
void *callout_info;
size_t callout_len;
pid_t pid;
};
} __randomize_layout;
extern struct key_type key_type_request_key_auth;
extern struct key *request_key_auth_new(struct key *target,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment