Commit 5cb8418c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-2019-11-08' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Two NVMe device removal crash fixes, and a compat fixup for for an
   ioctl that was introduced in this release (Anton, Charles, Max - via
   Keith)

 - Missing error path mutex unlock for drbd (Dan)

 - cgroup writeback fixup on dead memcg (Tejun)

 - blkcg online stats print fix (Tejun)

* tag 'for-linus-2019-11-08' of git://git.kernel.dk/linux-block:
  cgroup,writeback: don't switch wbs immediately on dead wbs if the memcg is dead
  block: drbd: remove a stray unlock in __drbd_send_protocol()
  blkcg: make blkcg_print_stat() print stats only for online blkgs
  nvme: change nvme_passthru_cmd64 to explicitly mark rsvd
  nvme-multipath: fix crash in nvme_mpath_clear_ctrl_paths
  nvme-rdma: fix a segmentation fault during module unload
parents abf6c397 65de03e2
......@@ -934,9 +934,14 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
int i;
bool has_stats = false;
spin_lock_irq(&blkg->q->queue_lock);
if (!blkg->online)
goto skip;
dname = blkg_dev_name(blkg);
if (!dname)
continue;
goto skip;
/*
* Hooray string manipulation, count is the size written NOT
......@@ -946,8 +951,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
spin_lock_irq(&blkg->q->queue_lock);
blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes), &rwstat);
rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
......@@ -960,8 +963,6 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
spin_unlock_irq(&blkg->q->queue_lock);
if (rbytes || wbytes || rios || wios) {
has_stats = true;
off += scnprintf(buf+off, size-off,
......@@ -999,6 +1000,8 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
seq_commit(sf, -1);
}
}
skip:
spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
......
......@@ -786,7 +786,6 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock();
mutex_unlock(&sock->mutex);
drbd_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP;
}
......
......@@ -158,9 +158,11 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
struct nvme_ns *ns;
mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
if (nvme_mpath_clear_current_path(ns))
kblockd_schedule_work(&ns->head->requeue_work);
up_read(&ctrl->namespaces_rwsem);
mutex_unlock(&ctrl->scan_lock);
}
......
......@@ -2133,8 +2133,16 @@ static int __init nvme_rdma_init_module(void)
static void __exit nvme_rdma_cleanup_module(void)
{
struct nvme_rdma_ctrl *ctrl;
nvmf_unregister_transport(&nvme_rdma_transport);
ib_unregister_client(&nvme_rdma_ib_client);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_rdma_ctrl_mutex);
flush_workqueue(nvme_delete_wq);
}
module_init(nvme_rdma_init_module);
......
......@@ -576,10 +576,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
spin_unlock(&inode->i_lock);
/*
* A dying wb indicates that the memcg-blkcg mapping has changed
* and a new wb is already serving the memcg. Switch immediately.
* A dying wb indicates that either the blkcg associated with the
* memcg changed or the associated memcg is dying. In the first
* case, a replacement wb should already be available and we should
* refresh the wb immediately. In the second case, trying to
* refresh will keep failing.
*/
if (unlikely(wb_dying(wbc->wb)))
if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
......
......@@ -63,6 +63,7 @@ struct nvme_passthru_cmd64 {
__u32 cdw14;
__u32 cdw15;
__u32 timeout_ms;
__u32 rsvd2;
__u64 result;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment