Commit 89ec9cfd authored by Mike Christie's avatar Mike Christie Committed by Nicholas Bellinger

tcmu: split unmap_thread_fn

Separate unmap_thread_fn to make it easier to read.

Note: this patch does not fix the bug where we might
miss a wake up call. The next patch will fix that.
This patch only separates the code into functions.
Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent bf99ec13
...@@ -1973,71 +1973,91 @@ static struct target_backend_ops tcmu_ops = { ...@@ -1973,71 +1973,91 @@ static struct target_backend_ops tcmu_ops = {
.tb_dev_attrib_attrs = NULL, .tb_dev_attrib_attrs = NULL,
}; };
static int unmap_thread_fn(void *data)
static void find_free_blocks(void)
{ {
struct tcmu_dev *udev; struct tcmu_dev *udev;
loff_t off; loff_t off;
uint32_t start, end, block; uint32_t start, end, block;
while (!kthread_should_stop()) { mutex_lock(&root_udev_mutex);
DEFINE_WAIT(__wait); list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(&unmap_wait, &__wait);
if (kthread_should_stop()) /* Try to complete the finished commands first */
break; tcmu_handle_completions(udev);
mutex_lock(&root_udev_mutex); /* Skip the udevs waiting the global pool or in idle */
list_for_each_entry(udev, &root_udev, node) { if (udev->waiting_global || !udev->dbi_thresh) {
mutex_lock(&udev->cmdr_lock); mutex_unlock(&udev->cmdr_lock);
continue;
}
/* Try to complete the finished commands first */ end = udev->dbi_max + 1;
tcmu_handle_completions(udev); block = find_last_bit(udev->data_bitmap, end);
if (block == udev->dbi_max) {
/*
* The last bit is dbi_max, so there is
* no need to shrink any blocks.
*/
mutex_unlock(&udev->cmdr_lock);
continue;
} else if (block == end) {
/* The current udev will goto idle state */
udev->dbi_thresh = start = 0;
udev->dbi_max = 0;
} else {
udev->dbi_thresh = start = block + 1;
udev->dbi_max = block;
}
/* Skip the udevs waiting the global pool or in idle */ /* Here will truncate the data area from off */
if (udev->waiting_global || !udev->dbi_thresh) { off = udev->data_off + start * DATA_BLOCK_SIZE;
mutex_unlock(&udev->cmdr_lock); unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
continue;
}
end = udev->dbi_max + 1; /* Release the block pages */
block = find_last_bit(udev->data_bitmap, end); tcmu_blocks_release(&udev->data_blocks, start, end);
if (block == udev->dbi_max) { mutex_unlock(&udev->cmdr_lock);
/* }
* The last bit is dbi_max, so there is mutex_unlock(&root_udev_mutex);
* no need to shrink any blocks. }
*/
mutex_unlock(&udev->cmdr_lock);
continue;
} else if (block == end) {
/* The current udev will goto idle state */
udev->dbi_thresh = start = 0;
udev->dbi_max = 0;
} else {
udev->dbi_thresh = start = block + 1;
udev->dbi_max = block;
}
/* Here will truncate the data area from off */ static void run_cmdr_queues(void)
off = udev->data_off + start * DATA_BLOCK_SIZE; {
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); struct tcmu_dev *udev;
/* Release the block pages */ /*
tcmu_blocks_release(&udev->data_blocks, start, end); * Try to wake up the udevs who are waiting
* for the global data block pool.
*/
mutex_lock(&root_udev_mutex);
list_for_each_entry(udev, &root_udev, node) {
mutex_lock(&udev->cmdr_lock);
if (!udev->waiting_global) {
mutex_unlock(&udev->cmdr_lock); mutex_unlock(&udev->cmdr_lock);
break;
} }
mutex_unlock(&udev->cmdr_lock);
/* wake_up(&udev->wait_cmdr);
* Try to wake up the udevs who are waiting }
* for the global data pool. mutex_unlock(&root_udev_mutex);
*/ }
list_for_each_entry(udev, &root_udev, node) {
if (udev->waiting_global) static int unmap_thread_fn(void *data)
wake_up(&udev->wait_cmdr); {
} while (!kthread_should_stop()) {
mutex_unlock(&root_udev_mutex); DEFINE_WAIT(__wait);
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
schedule();
finish_wait(&unmap_wait, &__wait);
if (kthread_should_stop())
break;
find_free_blocks();
run_cmdr_queues();
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment