Commit 75021d28 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial

Pull trivial updates from Jiri Kosina:
 "Trivial stuff from trivial tree that can be trivially summed up as:

   - treewide drop of spurious unlikely() before IS_ERR() from Viresh
     Kumar

   - cosmetic fixes (that don't really affect basic functionality of the
     driver) for pktcdvd and bcache, from Julia Lawall and Petr Mladek

   - various comment / printk fixes and updates all over the place"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial:
  bcache: Really show state of work pending bit
  hwmon: applesmc: fix comment typos
  Kconfig: remove comment about scsi_wait_scan module
  class_find_device: fix reference to argument "match"
  debugfs: document that debugfs_remove*() accepts NULL and error values
  net: Drop unlikely before IS_ERR(_OR_NULL)
  mm: Drop unlikely before IS_ERR(_OR_NULL)
  fs: Drop unlikely before IS_ERR(_OR_NULL)
  drivers: net: Drop unlikely before IS_ERR(_OR_NULL)
  drivers: misc: Drop unlikely before IS_ERR(_OR_NULL)
  UBI: Update comments to reflect UBI_METAONLY flag
  pktcdvd: drop null test before destroy functions
parents 6f1da317 8d090f47
...@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device); ...@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
* *
* Note, you will need to drop the reference with put_device() after use. * Note, you will need to drop the reference with put_device() after use.
* *
* @fn is allowed to do anything including calling back into class * @match is allowed to do anything including calling back into class
* code. There's no locking restriction. * code. There's no locking restriction.
*/ */
struct device *class_find_device(struct class *class, struct device *start, struct device *class_find_device(struct class *class, struct device *start,
......
...@@ -2803,8 +2803,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) ...@@ -2803,8 +2803,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
out_mem2: out_mem2:
put_disk(disk); put_disk(disk);
out_mem: out_mem:
if (pd->rb_pool) mempool_destroy(pd->rb_pool);
mempool_destroy(pd->rb_pool);
kfree(pd); kfree(pd);
out_mutex: out_mutex:
mutex_unlock(&ctl_mutex); mutex_unlock(&ctl_mutex);
......
...@@ -1138,7 +1138,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) ...@@ -1138,7 +1138,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
return ret; return ret;
} }
/* Create accelerometer ressources */ /* Create accelerometer resources */
static int applesmc_create_accelerometer(void) static int applesmc_create_accelerometer(void)
{ {
struct input_dev *idev; struct input_dev *idev;
...@@ -1191,7 +1191,7 @@ static int applesmc_create_accelerometer(void) ...@@ -1191,7 +1191,7 @@ static int applesmc_create_accelerometer(void)
return ret; return ret;
} }
/* Release all ressources used by the accelerometer */ /* Release all resources used by the accelerometer */
static void applesmc_release_accelerometer(void) static void applesmc_release_accelerometer(void)
{ {
if (!smcreg.has_accelerometer) if (!smcreg.has_accelerometer)
......
...@@ -167,8 +167,6 @@ EXPORT_SYMBOL(closure_debug_destroy); ...@@ -167,8 +167,6 @@ EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug; static struct dentry *debug;
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
static int debug_seq_show(struct seq_file *f, void *data) static int debug_seq_show(struct seq_file *f, void *data)
{ {
struct closure *cl; struct closure *cl;
...@@ -182,7 +180,7 @@ static int debug_seq_show(struct seq_file *f, void *data) ...@@ -182,7 +180,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_REMAINING_MASK); r & CLOSURE_REMAINING_MASK);
seq_printf(f, "%s%s%s%s\n", seq_printf(f, "%s%s%s%s\n",
test_bit(WORK_STRUCT_PENDING, test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "", work_data_bits(&cl->work)) ? "Q" : "",
r & CLOSURE_RUNNING ? "R" : "", r & CLOSURE_RUNNING ? "R" : "",
r & CLOSURE_STACK ? "S" : "", r & CLOSURE_STACK ? "S" : "",
......
...@@ -926,7 +926,7 @@ struct c2port_device *c2port_device_register(char *name, ...@@ -926,7 +926,7 @@ struct c2port_device *c2port_device_register(char *name,
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev, c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id); "c2port%d", c2dev->id);
if (unlikely(IS_ERR(c2dev->dev))) { if (IS_ERR(c2dev->dev)) {
ret = PTR_ERR(c2dev->dev); ret = PTR_ERR(c2dev->dev);
goto error_device_create; goto error_device_create;
} }
......
...@@ -112,8 +112,8 @@ static int gluebi_get_device(struct mtd_info *mtd) ...@@ -112,8 +112,8 @@ static int gluebi_get_device(struct mtd_info *mtd)
* The MTD device is already referenced and this is just one * The MTD device is already referenced and this is just one
* more reference. MTD allows many users to open the same * more reference. MTD allows many users to open the same
* volume simultaneously and do not distinguish between * volume simultaneously and do not distinguish between
* readers/writers/exclusive openers as UBI does. So we do not * readers/writers/exclusive/meta openers as UBI does. So we do
* open the UBI volume again - just increase the reference * not open the UBI volume again - just increase the reference
* counter and return. * counter and return.
*/ */
gluebi->refcnt += 1; gluebi->refcnt += 1;
......
...@@ -1036,7 +1036,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) ...@@ -1036,7 +1036,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
} }
desc = knav_pool_desc_get(netcp->tx_pool); desc = knav_pool_desc_get(netcp->tx_pool);
if (unlikely(IS_ERR_OR_NULL(desc))) { if (IS_ERR_OR_NULL(desc)) {
dev_err(netcp->ndev_dev, "out of TX desc\n"); dev_err(netcp->ndev_dev, "out of TX desc\n");
dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE); dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
return NULL; return NULL;
...@@ -1069,7 +1069,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) ...@@ -1069,7 +1069,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
} }
ndesc = knav_pool_desc_get(netcp->tx_pool); ndesc = knav_pool_desc_get(netcp->tx_pool);
if (unlikely(IS_ERR_OR_NULL(ndesc))) { if (IS_ERR_OR_NULL(ndesc)) {
dev_err(netcp->ndev_dev, "out of TX desc for frags\n"); dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE); dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
goto free_descs; goto free_descs;
......
...@@ -242,13 +242,6 @@ config SCSI_SCAN_ASYNC ...@@ -242,13 +242,6 @@ config SCSI_SCAN_ASYNC
system continues booting, and even probe devices on different system continues booting, and even probe devices on different
busses in parallel, leading to a significant speed-up. busses in parallel, leading to a significant speed-up.
If you have built SCSI as modules, enabling this option can
be a problem as the devices may not have been found by the
time your system expects them to have been. You can load the
scsi_wait_scan module to ensure that all scans have completed.
If you build your SCSI drivers into the kernel, then everything
will work fine if you say Y here.
You can override this choice by specifying "scsi_mod.scan=sync" You can override this choice by specifying "scsi_mod.scan=sync"
or async on the kernel's command line. or async on the kernel's command line.
......
...@@ -84,7 +84,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, ...@@ -84,7 +84,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
dentry = d_hash_and_lookup(parent, name); dentry = d_hash_and_lookup(parent, name);
if (unlikely(IS_ERR(dentry))) if (IS_ERR(dentry))
return; return;
if (dentry) { if (dentry) {
......
...@@ -270,7 +270,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, ...@@ -270,7 +270,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry, ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
mode); mode);
if (unlikely(IS_ERR(ecryptfs_inode))) { if (IS_ERR(ecryptfs_inode)) {
ecryptfs_printk(KERN_WARNING, "Failed to create file in" ecryptfs_printk(KERN_WARNING, "Failed to create file in"
"lower filesystem\n"); "lower filesystem\n");
rc = PTR_ERR(ecryptfs_inode); rc = PTR_ERR(ecryptfs_inode);
......
...@@ -900,7 +900,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, ...@@ -900,7 +900,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
bh = read_extent_tree_block(inode, path[ppos].p_block, --i, bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
flags); flags);
if (unlikely(IS_ERR(bh))) { if (IS_ERR(bh)) {
ret = PTR_ERR(bh); ret = PTR_ERR(bh);
goto err; goto err;
} }
...@@ -5796,7 +5796,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1, ...@@ -5796,7 +5796,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
int split = 0; int split = 0;
path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path1))) { if (IS_ERR(path1)) {
*erp = PTR_ERR(path1); *erp = PTR_ERR(path1);
path1 = NULL; path1 = NULL;
finish: finish:
...@@ -5804,7 +5804,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1, ...@@ -5804,7 +5804,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1,
goto repeat; goto repeat;
} }
path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
if (unlikely(IS_ERR(path2))) { if (IS_ERR(path2)) {
*erp = PTR_ERR(path2); *erp = PTR_ERR(path2);
path2 = NULL; path2 = NULL;
goto finish; goto finish;
......
...@@ -1429,7 +1429,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, ...@@ -1429,7 +1429,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
} }
num++; num++;
bh = ext4_getblk(NULL, dir, b++, 0); bh = ext4_getblk(NULL, dir, b++, 0);
if (unlikely(IS_ERR(bh))) { if (IS_ERR(bh)) {
if (ra_max == 0) { if (ra_max == 0) {
ret = bh; ret = bh;
goto cleanup_and_exit; goto cleanup_and_exit;
......
...@@ -1966,7 +1966,7 @@ static int link_path_walk(const char *name, struct nameidata *nd) ...@@ -1966,7 +1966,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err) { if (err) {
const char *s = get_link(nd); const char *s = get_link(nd);
if (unlikely(IS_ERR(s))) if (IS_ERR(s))
return PTR_ERR(s); return PTR_ERR(s);
err = 0; err = 0;
if (unlikely(!s)) { if (unlikely(!s)) {
...@@ -3380,7 +3380,7 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, ...@@ -3380,7 +3380,7 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
return ERR_PTR(-ELOOP); return ERR_PTR(-ELOOP);
filename = getname_kernel(name); filename = getname_kernel(name);
if (unlikely(IS_ERR(filename))) if (IS_ERR(filename))
return ERR_CAST(filename); return ERR_CAST(filename);
set_nameidata(&nd, -1, filename); set_nameidata(&nd, -1, filename);
......
...@@ -597,7 +597,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, ...@@ -597,7 +597,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
qname.name = __name; qname.name = __name;
newdent = d_hash_and_lookup(dentry, &qname); newdent = d_hash_and_lookup(dentry, &qname);
if (unlikely(IS_ERR(newdent))) if (IS_ERR(newdent))
goto end_advance; goto end_advance;
if (!newdent) { if (!newdent) {
newdent = d_alloc(dentry, &qname); newdent = d_alloc(dentry, &qname);
......
...@@ -124,7 +124,7 @@ objio_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, ...@@ -124,7 +124,7 @@ objio_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
retry_lookup: retry_lookup:
od = osduld_info_lookup(&odi); od = osduld_info_lookup(&odi);
if (unlikely(IS_ERR(od))) { if (IS_ERR(od)) {
err = PTR_ERR(od); err = PTR_ERR(od);
dprintk("%s: osduld_info_lookup => %d\n", __func__, err); dprintk("%s: osduld_info_lookup => %d\n", __func__, err);
if (err == -ENODEV && retry_flag) { if (err == -ENODEV && retry_flag) {
......
...@@ -948,7 +948,7 @@ static struct ctl_dir *get_subdir(struct ctl_dir *dir, ...@@ -948,7 +948,7 @@ static struct ctl_dir *get_subdir(struct ctl_dir *dir,
found: found:
subdir->header.nreg++; subdir->header.nreg++;
failed: failed:
if (unlikely(IS_ERR(subdir))) { if (IS_ERR(subdir)) {
pr_err("sysctl could not get directory: "); pr_err("sysctl could not get directory: ");
sysctl_print_dir(dir); sysctl_print_dir(dir);
pr_cont("/%*.*s %ld\n", pr_cont("/%*.*s %ld\n",
......
...@@ -151,7 +151,7 @@ static int start_stop_khugepaged(void) ...@@ -151,7 +151,7 @@ static int start_stop_khugepaged(void)
if (!khugepaged_thread) if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL, khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged"); "khugepaged");
if (unlikely(IS_ERR(khugepaged_thread))) { if (IS_ERR(khugepaged_thread)) {
pr_err("khugepaged: kthread_run(khugepaged) failed\n"); pr_err("khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread); err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL; khugepaged_thread = NULL;
......
...@@ -1175,7 +1175,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) ...@@ -1175,7 +1175,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
info, OVS_FLOW_CMD_NEW, false, info, OVS_FLOW_CMD_NEW, false,
ufid_flags); ufid_flags);
if (unlikely(IS_ERR(reply))) { if (IS_ERR(reply)) {
error = PTR_ERR(reply); error = PTR_ERR(reply);
goto err_unlock_ovs; goto err_unlock_ovs;
} }
......
...@@ -4475,7 +4475,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval ...@@ -4475,7 +4475,7 @@ static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval
} }
newfile = sock_alloc_file(newsock, 0, NULL); newfile = sock_alloc_file(newsock, 0, NULL);
if (unlikely(IS_ERR(newfile))) { if (IS_ERR(newfile)) {
put_unused_fd(retval); put_unused_fd(retval);
sock_release(newsock); sock_release(newsock);
return PTR_ERR(newfile); return PTR_ERR(newfile);
......
...@@ -373,7 +373,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) ...@@ -373,7 +373,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
file = alloc_file(&path, FMODE_READ | FMODE_WRITE, file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops); &socket_file_ops);
if (unlikely(IS_ERR(file))) { if (IS_ERR(file)) {
/* drop dentry, keep inode */ /* drop dentry, keep inode */
ihold(d_inode(path.dentry)); ihold(d_inode(path.dentry));
path_put(&path); path_put(&path);
...@@ -1303,7 +1303,7 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, ...@@ -1303,7 +1303,7 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
} }
newfile1 = sock_alloc_file(sock1, flags, NULL); newfile1 = sock_alloc_file(sock1, flags, NULL);
if (unlikely(IS_ERR(newfile1))) { if (IS_ERR(newfile1)) {
err = PTR_ERR(newfile1); err = PTR_ERR(newfile1);
goto out_put_unused_both; goto out_put_unused_both;
} }
...@@ -1467,7 +1467,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, ...@@ -1467,7 +1467,7 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
goto out_put; goto out_put;
} }
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (unlikely(IS_ERR(newfile))) { if (IS_ERR(newfile)) {
err = PTR_ERR(newfile); err = PTR_ERR(newfile);
put_unused_fd(newfd); put_unused_fd(newfd);
sock_release(newsock); sock_release(newsock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment