Commit b07039b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'driver-core-4.21-rc1' of...

Merge tag 'driver-core-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core

Pull driver core updates from Greg KH:
 "Here is the "big" set of driver core patches for 4.21-rc1.

  It's not really big, just a number of small changes for some reported
  issues, some documentation updates to hopefully make it harder for
  people to abuse the driver model, and some other minor cleanups.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'driver-core-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core:
  mm, memory_hotplug: update a comment in unregister_memory()
  component: convert to DEFINE_SHOW_ATTRIBUTE
  sysfs: Disable lockdep for driver bind/unbind files
  driver core: Add missing dev->bus->need_parent_lock checks
  kobject: return error code if writing /sys/.../uevent fails
  driver core: Move async_synchronize_full call
  driver core: platform: Respect return code of platform_device_register_full()
  kref/kobject: Improve documentation
  drivers/base/memory.c: Use DEVICE_ATTR_RO and friends
  driver core: Replace simple_strto{l,ul} by kstrtou{l,ul}
  kernfs: Improve kernfs_notify() poll notification latency
  kobject: Fix warnings in lib/kobject_uevent.c
  kobject: drop unnecessary cast "%llu" for u64
  driver core: fix comments for device_block_probing()
  driver core: Replace simple_strtol by kstrtoint
parents 02061181 16df1456
......@@ -279,10 +279,14 @@ such a method has a form like::
One important point cannot be overstated: every kobject must have a
release() method, and the kobject must persist (in a consistent state)
until that method is called. If these constraints are not met, the code is
flawed. Note that the kernel will warn you if you forget to provide a
flawed. Note that the kernel will warn you if you forget to provide a
release() method. Do not try to get rid of this warning by providing an
"empty" release function; you will be mocked mercilessly by the kobject
maintainer if you attempt this.
"empty" release function.
If all your cleanup function needs to do is call kfree(), then you must
create a wrapper function which uses container_of() to upcast to the correct
type (as shown in the example above) and then calls kfree() on the overall
structure.
Note, the name of the kobject is available in the release function, but it
must NOT be changed within this callback. Otherwise there will be a memory
......
......@@ -31,6 +31,9 @@ static struct kset *system_kset;
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct driver_attribute driver_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
......@@ -195,7 +198,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
static DRIVER_ATTR_WO(unbind);
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
/*
* Manually attach a device to a driver.
......@@ -231,7 +234,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
static DRIVER_ATTR_WO(bind);
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{
......@@ -611,8 +614,10 @@ static void remove_probe_files(struct bus_type *bus)
static ssize_t uevent_store(struct device_driver *drv, const char *buf,
size_t count)
{
kobject_synth_uevent(&drv->p->kobj, buf, count);
return count;
int rc;
rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
return rc ? rc : count;
}
static DRIVER_ATTR_WO(uevent);
......@@ -828,8 +833,10 @@ static void klist_devices_put(struct klist_node *n)
static ssize_t bus_uevent_store(struct bus_type *bus,
const char *buf, size_t count)
{
kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
return count;
int rc;
rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
return rc ? rc : count;
}
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
......
......@@ -85,17 +85,7 @@ static int component_devices_show(struct seq_file *s, void *data)
return 0;
}
static int component_devices_open(struct inode *inode, struct file *file)
{
return single_open(file, component_devices_show, inode->i_private);
}
static const struct file_operations component_devices_fops = {
.open = component_devices_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
DEFINE_SHOW_ATTRIBUTE(component_devices);
static int __init component_debug_init(void)
{
......
......@@ -815,10 +815,12 @@ ssize_t device_store_ulong(struct device *dev,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
char *end;
unsigned long new = simple_strtoul(buf, &end, 0);
if (end == buf)
return -EINVAL;
int ret;
unsigned long new;
ret = kstrtoul(buf, 0, &new);
if (ret)
return ret;
*(unsigned long *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
return size;
......@@ -839,9 +841,14 @@ ssize_t device_store_int(struct device *dev,
const char *buf, size_t size)
{
struct dev_ext_attribute *ea = to_ext_attr(attr);
char *end;
long new = simple_strtol(buf, &end, 0);
if (end == buf || new > INT_MAX || new < INT_MIN)
int ret;
long new;
ret = kstrtol(buf, 0, &new);
if (ret)
return ret;
if (new > INT_MAX || new < INT_MIN)
return -EINVAL;
*(int *)(ea->var) = new;
/* Always return full write size even if we didn't consume all */
......@@ -911,8 +918,7 @@ static void device_release(struct kobject *kobj)
else if (dev->class && dev->class->dev_release)
dev->class->dev_release(dev);
else
WARN(1, KERN_ERR "Device '%s' does not have a release() "
"function, it is broken and must be fixed.\n",
WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
dev_name(dev));
kfree(p);
}
......@@ -1088,8 +1094,14 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
if (kobject_synth_uevent(&dev->kobj, buf, count))
int rc;
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
dev_err(dev, "uevent: failed to send synthetic uevent\n");
return rc;
}
return count;
}
......
......@@ -179,7 +179,7 @@ static void driver_deferred_probe_trigger(void)
}
/**
* device_block_probing() - Block/defere device's probes
* device_block_probing() - Block/defer device's probes
*
* It will disable probing of devices and defer their probes instead.
*/
......@@ -223,7 +223,10 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
static int deferred_probe_timeout = -1;
static int __init deferred_probe_timeout_setup(char *str)
{
deferred_probe_timeout = simple_strtol(str, NULL, 10);
int timeout;
if (!kstrtoint(str, 10, &timeout))
deferred_probe_timeout = timeout;
return 1;
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
......@@ -453,7 +456,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (defer_all_probes) {
/*
* Value of defer_all_probes can be set only by
* device_defer_all_probes_enable() which, in turn, will call
* device_block_probing() which, in turn, will call
* wait_for_device_probe() right after that to avoid any races.
*/
dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
......@@ -928,16 +931,13 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
if (driver_allows_async_probing(drv))
async_synchronize_full();
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent)
if (parent && dev->bus->need_parent_lock)
device_unlock(parent);
device_links_unbind_consumers(dev);
if (parent)
if (parent && dev->bus->need_parent_lock)
device_lock(parent);
device_lock(dev);
......@@ -1036,6 +1036,9 @@ void driver_detach(struct device_driver *drv)
struct device_private *dev_prv;
struct device *dev;
if (driver_allows_async_probing(drv))
async_synchronize_full();
for (;;) {
spin_lock(&drv->p->klist_devices.k_lock);
if (list_empty(&drv->p->klist_devices.k_list)) {
......
......@@ -109,8 +109,8 @@ static unsigned long get_memory_block_size(void)
* uses.
*/
static ssize_t show_mem_start_phys_index(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long phys_index;
......@@ -122,8 +122,8 @@ static ssize_t show_mem_start_phys_index(struct device *dev,
/*
* Show whether the section of memory is likely to be hot-removable
*/
static ssize_t show_mem_removable(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned long i, pfn;
int ret = 1;
......@@ -146,8 +146,8 @@ static ssize_t show_mem_removable(struct device *dev,
/*
* online, offline, going offline, etc.
*/
static ssize_t show_mem_state(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct memory_block *mem = to_memory_block(dev);
ssize_t len = 0;
......@@ -286,7 +286,7 @@ static int memory_subsys_online(struct device *dev)
return 0;
/*
* If we are called from store_mem_state(), online_type will be
* If we are called from state_store(), online_type will be
* set >= 0 Otherwise we were called from the device online
* attribute and need to set the online_type.
*/
......@@ -315,9 +315,8 @@ static int memory_subsys_offline(struct device *dev)
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct memory_block *mem = to_memory_block(dev);
int ret, online_type;
......@@ -374,7 +373,7 @@ store_mem_state(struct device *dev,
* s.t. if I offline all of these sections I can then
* remove the physical device?
*/
static ssize_t show_phys_device(struct device *dev,
static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
......@@ -395,7 +394,7 @@ static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
}
}
static ssize_t show_valid_zones(struct device *dev,
static ssize_t valid_zones_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
......@@ -435,33 +434,31 @@ static ssize_t show_valid_zones(struct device *dev,
return strlen(buf);
}
static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
static DEVICE_ATTR_RO(valid_zones);
#endif
static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
static DEVICE_ATTR_RO(phys_index);
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RO(phys_device);
static DEVICE_ATTR_RO(removable);
/*
* Block size attribute stuff
*/
static ssize_t
print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t block_size_bytes_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", get_memory_block_size());
}
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static DEVICE_ATTR_RO(block_size_bytes);
/*
* Memory auto online policy.
*/
static ssize_t
show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (memhp_auto_online)
return sprintf(buf, "online\n");
......@@ -469,9 +466,9 @@ show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "offline\n");
}
static ssize_t
store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
if (sysfs_streq(buf, "online"))
memhp_auto_online = true;
......@@ -483,8 +480,7 @@ store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
return count;
}
static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
store_auto_online_blocks);
static DEVICE_ATTR_RW(auto_online_blocks);
/*
* Some architectures will have custom drivers to do this, and
......@@ -493,9 +489,8 @@ static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
* and will require this interface.
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
int nid, ret;
......@@ -525,7 +520,7 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
return ret;
}
static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
static DEVICE_ATTR_WO(probe);
#endif
#ifdef CONFIG_MEMORY_FAILURE
......@@ -534,10 +529,9 @@ static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
*/
/* Soft offline a page */
static ssize_t
store_soft_offline_page(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t soft_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
......@@ -553,10 +547,9 @@ store_soft_offline_page(struct device *dev,
}
/* Forcibly offline a page, including killing processes. */
static ssize_t
store_hard_offline_page(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t hard_offline_page_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
u64 pfn;
......@@ -569,8 +562,8 @@ store_hard_offline_page(struct device *dev,
return ret ? ret : count;
}
static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
#endif
/*
......@@ -739,7 +732,7 @@ unregister_memory(struct memory_block *memory)
{
BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
/* drop the ref. we got in remove_memory_section() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
......
......@@ -234,7 +234,7 @@ struct platform_object {
*/
void platform_device_put(struct platform_device *pdev)
{
if (pdev)
if (!IS_ERR_OR_NULL(pdev))
put_device(&pdev->dev);
}
EXPORT_SYMBOL_GPL(platform_device_put);
......@@ -447,7 +447,7 @@ void platform_device_del(struct platform_device *pdev)
{
int i;
if (pdev) {
if (!IS_ERR_OR_NULL(pdev)) {
device_del(&pdev->dev);
if (pdev->id_auto) {
......
......@@ -857,7 +857,6 @@ static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_open_node *on;
struct kernfs_super_info *info;
repeat:
/* pop one off the notify_list */
......@@ -871,17 +870,6 @@ static void kernfs_notify_workfn(struct work_struct *work)
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
/* kick poll */
spin_lock_irq(&kernfs_open_node_lock);
on = kn->attr.open;
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
spin_unlock_irq(&kernfs_open_node_lock);
/* kick fsnotify */
mutex_lock(&kernfs_mutex);
......@@ -934,10 +922,21 @@ void kernfs_notify(struct kernfs_node *kn)
{
static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
unsigned long flags;
struct kernfs_open_node *on;
if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
return;
/* kick poll immediately */
spin_lock_irqsave(&kernfs_open_node_lock, flags);
on = kn->attr.open;
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
/* schedule work to kick fsnotify */
spin_lock_irqsave(&kernfs_notify_lock, flags);
if (!kn->attr.notify_next) {
kernfs_get(kn);
......
......@@ -53,10 +53,7 @@ static inline void kref_get(struct kref *kref)
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
* in as this function.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
......
......@@ -1208,8 +1208,10 @@ static ssize_t store_uevent(struct module_attribute *mattr,
struct module_kobject *mk,
const char *buffer, size_t count)
{
kobject_synth_uevent(&mk->kobj, buffer, count);
return count;
int rc;
rc = kobject_synth_uevent(&mk->kobj, buffer, count);
return rc ? rc : count;
}
struct module_attribute module_uevent =
......
......@@ -639,7 +639,7 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
......
......@@ -240,6 +240,7 @@ static int kobj_usermode_filter(struct kobject *kobj)
ops = kobj_ns_ops(kobj);
if (ops) {
const void *init_ns, *ns;
ns = kobj->ktype->namespace(kobj);
init_ns = ops->initial_ns();
return ns != init_ns;
......@@ -390,6 +391,7 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
ops = kobj_ns_ops(kobj);
if (!ops && kobj->kset) {
struct kobject *ksobj = &kobj->kset->kobj;
if (ksobj->parent != NULL)
ops = kobj_ns_ops(ksobj->parent);
}
......@@ -579,7 +581,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
if (retval) {
mutex_unlock(&uevent_sock_mutex);
goto exit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment