Commit d6e260cc authored by Mike Snitzer's avatar Mike Snitzer

dm vdo wait-queue: add proper namespace to interface

Rename various interfaces and structs associated with vdo's wait-queue,
e.g.: s/wait_queue/vdo_wait_queue/, s/waiter/vdo_waiter/, etc.

Now all function names start with "vdo_waitq_" or "vdo_waiter_".
Reviewed-by: default avatarKen Raeburn <raeburn@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent 46a707cc
This diff is collapsed.
...@@ -68,7 +68,7 @@ struct vdo_page_cache { ...@@ -68,7 +68,7 @@ struct vdo_page_cache {
/* how many VPCs waiting for free page */ /* how many VPCs waiting for free page */
unsigned int waiter_count; unsigned int waiter_count;
/* queue of waiters who want a free page */ /* queue of waiters who want a free page */
struct wait_queue free_waiters; struct vdo_wait_queue free_waiters;
/* /*
* Statistics are only updated on the logical zone thread, but are accessed from other * Statistics are only updated on the logical zone thread, but are accessed from other
* threads. * threads.
...@@ -129,7 +129,7 @@ struct page_info { ...@@ -129,7 +129,7 @@ struct page_info {
/* page state */ /* page state */
enum vdo_page_buffer_state state; enum vdo_page_buffer_state state;
/* queue of completions awaiting this item */ /* queue of completions awaiting this item */
struct wait_queue waiting; struct vdo_wait_queue waiting;
/* state linked list entry */ /* state linked list entry */
struct list_head state_entry; struct list_head state_entry;
/* LRU entry */ /* LRU entry */
...@@ -153,7 +153,7 @@ struct vdo_page_completion { ...@@ -153,7 +153,7 @@ struct vdo_page_completion {
/* The cache involved */ /* The cache involved */
struct vdo_page_cache *cache; struct vdo_page_cache *cache;
/* The waiter for the pending list */ /* The waiter for the pending list */
struct waiter waiter; struct vdo_waiter waiter;
/* The absolute physical block number of the page on disk */ /* The absolute physical block number of the page on disk */
physical_block_number_t pbn; physical_block_number_t pbn;
/* Whether the page may be modified */ /* Whether the page may be modified */
...@@ -167,7 +167,7 @@ struct vdo_page_completion { ...@@ -167,7 +167,7 @@ struct vdo_page_completion {
struct forest; struct forest;
struct tree_page { struct tree_page {
struct waiter waiter; struct vdo_waiter waiter;
/* Dirty list entry */ /* Dirty list entry */
struct list_head entry; struct list_head entry;
...@@ -228,7 +228,7 @@ struct block_map_zone { ...@@ -228,7 +228,7 @@ struct block_map_zone {
struct vio_pool *vio_pool; struct vio_pool *vio_pool;
/* The tree page which has issued or will be issuing a flush */ /* The tree page which has issued or will be issuing a flush */
struct tree_page *flusher; struct tree_page *flusher;
struct wait_queue flush_waiters; struct vdo_wait_queue flush_waiters;
/* The generation after the most recent flush */ /* The generation after the most recent flush */
u8 generation; u8 generation;
u8 oldest_generation; u8 oldest_generation;
......
...@@ -249,7 +249,7 @@ static void initialize_lbn_lock(struct data_vio *data_vio, logical_block_number_ ...@@ -249,7 +249,7 @@ static void initialize_lbn_lock(struct data_vio *data_vio, logical_block_number_
lock->lbn = lbn; lock->lbn = lbn;
lock->locked = false; lock->locked = false;
vdo_initialize_wait_queue(&lock->waiters); vdo_waitq_init(&lock->waiters);
zone_number = vdo_compute_logical_zone(data_vio); zone_number = vdo_compute_logical_zone(data_vio);
lock->zone = &vdo->logical_zones->zones[zone_number]; lock->zone = &vdo->logical_zones->zones[zone_number];
} }
...@@ -466,7 +466,7 @@ static void attempt_logical_block_lock(struct vdo_completion *completion) ...@@ -466,7 +466,7 @@ static void attempt_logical_block_lock(struct vdo_completion *completion)
} }
data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK; data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK;
vdo_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter);
/* /*
* Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the * Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the
...@@ -1191,10 +1191,10 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) ...@@ -1191,10 +1191,10 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
/* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */ /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
next_lock_holder = next_lock_holder =
waiter_as_data_vio(vdo_dequeue_next_waiter(&lock->waiters)); vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&lock->waiters));
/* Transfer the remaining lock waiters to the next lock holder. */ /* Transfer the remaining lock waiters to the next lock holder. */
vdo_transfer_all_waiters(&lock->waiters, vdo_waitq_transfer_all_waiters(&lock->waiters,
&next_lock_holder->logical.waiters); &next_lock_holder->logical.waiters);
result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn, result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
...@@ -1213,7 +1213,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) ...@@ -1213,7 +1213,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
* If there are still waiters, other data_vios must be trying to get the lock we just * If there are still waiters, other data_vios must be trying to get the lock we just
* transferred. We must ensure that the new lock holder doesn't block in the packer. * transferred. We must ensure that the new lock holder doesn't block in the packer.
*/ */
if (vdo_has_waiters(&next_lock_holder->logical.waiters)) if (vdo_waitq_has_waiters(&next_lock_holder->logical.waiters))
cancel_data_vio_compression(next_lock_holder); cancel_data_vio_compression(next_lock_holder);
/* /*
...@@ -1235,7 +1235,7 @@ static void release_logical_lock(struct vdo_completion *completion) ...@@ -1235,7 +1235,7 @@ static void release_logical_lock(struct vdo_completion *completion)
assert_data_vio_in_logical_zone(data_vio); assert_data_vio_in_logical_zone(data_vio);
if (vdo_has_waiters(&lock->waiters)) if (vdo_waitq_has_waiters(&lock->waiters))
transfer_lock(data_vio, lock); transfer_lock(data_vio, lock);
else else
release_lock(data_vio, lock); release_lock(data_vio, lock);
......
...@@ -54,7 +54,7 @@ enum async_operation_number { ...@@ -54,7 +54,7 @@ enum async_operation_number {
struct lbn_lock { struct lbn_lock {
logical_block_number_t lbn; logical_block_number_t lbn;
bool locked; bool locked;
struct wait_queue waiters; struct vdo_wait_queue waiters;
struct logical_zone *zone; struct logical_zone *zone;
}; };
...@@ -75,7 +75,7 @@ struct tree_lock { ...@@ -75,7 +75,7 @@ struct tree_lock {
/* The key for the lock map */ /* The key for the lock map */
u64 key; u64 key;
/* The queue of waiters for the page this vio is allocating or loading */ /* The queue of waiters for the page this vio is allocating or loading */
struct wait_queue waiters; struct vdo_wait_queue waiters;
/* The block map tree slots for this LBN */ /* The block map tree slots for this LBN */
struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1]; struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
}; };
...@@ -168,13 +168,13 @@ struct reference_updater { ...@@ -168,13 +168,13 @@ struct reference_updater {
bool increment; bool increment;
struct zoned_pbn zpbn; struct zoned_pbn zpbn;
struct pbn_lock *lock; struct pbn_lock *lock;
struct waiter waiter; struct vdo_waiter waiter;
}; };
/* A vio for processing user data requests. */ /* A vio for processing user data requests. */
struct data_vio { struct data_vio {
/* The wait_queue entry structure */ /* The vdo_wait_queue entry structure */
struct waiter waiter; struct vdo_waiter waiter;
/* The logical block of this request */ /* The logical block of this request */
struct lbn_lock logical; struct lbn_lock logical;
...@@ -288,7 +288,7 @@ static inline struct data_vio *as_data_vio(struct vdo_completion *completion) ...@@ -288,7 +288,7 @@ static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
return vio_as_data_vio(as_vio(completion)); return vio_as_data_vio(as_vio(completion));
} }
static inline struct data_vio *waiter_as_data_vio(struct waiter *waiter) static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
{ {
if (waiter == NULL) if (waiter == NULL)
return NULL; return NULL;
......
...@@ -270,7 +270,7 @@ struct hash_lock { ...@@ -270,7 +270,7 @@ struct hash_lock {
* to get the information they all need to deduplicate--either against each other, or * to get the information they all need to deduplicate--either against each other, or
* against an existing duplicate on disk. * against an existing duplicate on disk.
*/ */
struct wait_queue waiters; struct vdo_wait_queue waiters;
}; };
enum { enum {
...@@ -351,7 +351,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l ...@@ -351,7 +351,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
memset(lock, 0, sizeof(*lock)); memset(lock, 0, sizeof(*lock));
INIT_LIST_HEAD(&lock->pool_node); INIT_LIST_HEAD(&lock->pool_node);
INIT_LIST_HEAD(&lock->duplicate_ring); INIT_LIST_HEAD(&lock->duplicate_ring);
vdo_initialize_wait_queue(&lock->waiters); vdo_waitq_init(&lock->waiters);
list_add_tail(&lock->pool_node, &zone->lock_pool); list_add_tail(&lock->pool_node, &zone->lock_pool);
} }
...@@ -420,7 +420,7 @@ static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn ...@@ -420,7 +420,7 @@ static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn
*/ */
static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock) static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock)
{ {
return waiter_as_data_vio(vdo_dequeue_next_waiter(&lock->waiters)); return vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&lock->waiters));
} }
/** /**
...@@ -536,7 +536,7 @@ static struct data_vio *retire_lock_agent(struct hash_lock *lock) ...@@ -536,7 +536,7 @@ static struct data_vio *retire_lock_agent(struct hash_lock *lock)
*/ */
static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio) static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
{ {
vdo_enqueue_waiter(&lock->waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&lock->waiters, &data_vio->waiter);
/* /*
* Make sure the agent doesn't block indefinitely in the packer since it now has at least * Make sure the agent doesn't block indefinitely in the packer since it now has at least
...@@ -562,9 +562,9 @@ static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio) ...@@ -562,9 +562,9 @@ static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
* @waiter: The data_vio's waiter link. * @waiter: The data_vio's waiter link.
* @context: Not used. * @context: Not used.
*/ */
static void abort_waiter(struct waiter *waiter, void *context __always_unused) static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
{ {
write_data_vio(waiter_as_data_vio(waiter)); write_data_vio(vdo_waiter_as_data_vio(waiter));
} }
/** /**
...@@ -602,7 +602,7 @@ void vdo_clean_failed_hash_lock(struct data_vio *data_vio) ...@@ -602,7 +602,7 @@ void vdo_clean_failed_hash_lock(struct data_vio *data_vio)
/* Ensure we don't attempt to update advice when cleaning up. */ /* Ensure we don't attempt to update advice when cleaning up. */
lock->update_advice = false; lock->update_advice = false;
vdo_notify_all_waiters(&lock->waiters, abort_waiter, NULL); vdo_waitq_notify_all_waiters(&lock->waiters, abort_waiter, NULL);
if (lock->duplicate_lock != NULL) { if (lock->duplicate_lock != NULL) {
/* The agent must reference the duplicate zone to launch it. */ /* The agent must reference the duplicate zone to launch it. */
...@@ -650,7 +650,7 @@ static void finish_unlocking(struct vdo_completion *completion) ...@@ -650,7 +650,7 @@ static void finish_unlocking(struct vdo_completion *completion)
*/ */
lock->verified = false; lock->verified = false;
if (vdo_has_waiters(&lock->waiters)) { if (vdo_waitq_has_waiters(&lock->waiters)) {
/* /*
* UNLOCKING -> LOCKING transition: A new data_vio entered the hash lock while the * UNLOCKING -> LOCKING transition: A new data_vio entered the hash lock while the
* agent was releasing the PBN lock. The current agent exits and the waiter has to * agent was releasing the PBN lock. The current agent exits and the waiter has to
...@@ -750,7 +750,7 @@ static void finish_updating(struct vdo_completion *completion) ...@@ -750,7 +750,7 @@ static void finish_updating(struct vdo_completion *completion)
*/ */
lock->update_advice = false; lock->update_advice = false;
if (vdo_has_waiters(&lock->waiters)) { if (vdo_waitq_has_waiters(&lock->waiters)) {
/* /*
* UPDATING -> DEDUPING transition: A new data_vio arrived during the UDS update. * UPDATING -> DEDUPING transition: A new data_vio arrived during the UDS update.
* Send it on the verified dedupe path. The agent is done with the lock, but the * Send it on the verified dedupe path. The agent is done with the lock, but the
...@@ -812,7 +812,7 @@ static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio) ...@@ -812,7 +812,7 @@ static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio)
struct data_vio *agent = data_vio; struct data_vio *agent = data_vio;
ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING");
ASSERT_LOG_ONLY(!vdo_has_waiters(&lock->waiters), ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
"shouldn't have any lock waiters in DEDUPING"); "shouldn't have any lock waiters in DEDUPING");
/* Just release the lock reference if other data_vios are still deduping. */ /* Just release the lock reference if other data_vios are still deduping. */
...@@ -917,9 +917,9 @@ static int __must_check acquire_lock(struct hash_zone *zone, ...@@ -917,9 +917,9 @@ static int __must_check acquire_lock(struct hash_zone *zone,
* Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits
* on that lock. * on that lock.
*/ */
static void enter_forked_lock(struct waiter *waiter, void *context) static void enter_forked_lock(struct vdo_waiter *waiter, void *context)
{ {
struct data_vio *data_vio = waiter_as_data_vio(waiter); struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
struct hash_lock *new_lock = context; struct hash_lock *new_lock = context;
set_hash_lock(data_vio, new_lock); set_hash_lock(data_vio, new_lock);
...@@ -956,7 +956,7 @@ static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agen ...@@ -956,7 +956,7 @@ static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agen
set_hash_lock(new_agent, new_lock); set_hash_lock(new_agent, new_lock);
new_lock->agent = new_agent; new_lock->agent = new_agent;
vdo_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock); vdo_waitq_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock);
new_agent->is_duplicate = false; new_agent->is_duplicate = false;
start_writing(new_lock, new_agent); start_writing(new_lock, new_agent);
...@@ -1033,7 +1033,7 @@ static void start_deduping(struct hash_lock *lock, struct data_vio *agent, ...@@ -1033,7 +1033,7 @@ static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
launch_dedupe(lock, agent, true); launch_dedupe(lock, agent, true);
agent = NULL; agent = NULL;
} }
while (vdo_has_waiters(&lock->waiters)) while (vdo_waitq_has_waiters(&lock->waiters))
launch_dedupe(lock, dequeue_lock_waiter(lock), false); launch_dedupe(lock, dequeue_lock_waiter(lock), false);
if (agent_is_done) { if (agent_is_done) {
...@@ -1454,7 +1454,7 @@ static void finish_writing(struct hash_lock *lock, struct data_vio *agent) ...@@ -1454,7 +1454,7 @@ static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
lock->update_advice = true; lock->update_advice = true;
/* If there are any waiters, we need to start deduping them. */ /* If there are any waiters, we need to start deduping them. */
if (vdo_has_waiters(&lock->waiters)) { if (vdo_waitq_has_waiters(&lock->waiters)) {
/* /*
* WRITING -> DEDUPING transition: an asynchronously-written block failed to * WRITING -> DEDUPING transition: an asynchronously-written block failed to
* compress, so the PBN lock on the written copy was already transferred. The agent * compress, so the PBN lock on the written copy was already transferred. The agent
...@@ -1502,10 +1502,10 @@ static void finish_writing(struct hash_lock *lock, struct data_vio *agent) ...@@ -1502,10 +1502,10 @@ static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
*/ */
static struct data_vio *select_writing_agent(struct hash_lock *lock) static struct data_vio *select_writing_agent(struct hash_lock *lock)
{ {
struct wait_queue temp_queue; struct vdo_wait_queue temp_queue;
struct data_vio *data_vio; struct data_vio *data_vio;
vdo_initialize_wait_queue(&temp_queue); vdo_waitq_init(&temp_queue);
/* /*
* Move waiters to the temp queue one-by-one until we find an allocation. Not ideal to * Move waiters to the temp queue one-by-one until we find an allocation. Not ideal to
...@@ -1514,7 +1514,7 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock) ...@@ -1514,7 +1514,7 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock)
while (((data_vio = dequeue_lock_waiter(lock)) != NULL) && while (((data_vio = dequeue_lock_waiter(lock)) != NULL) &&
!data_vio_has_allocation(data_vio)) { !data_vio_has_allocation(data_vio)) {
/* Use the lower-level enqueue since we're just moving waiters around. */ /* Use the lower-level enqueue since we're just moving waiters around. */
vdo_enqueue_waiter(&temp_queue, &data_vio->waiter); vdo_waitq_enqueue_waiter(&temp_queue, &data_vio->waiter);
} }
if (data_vio != NULL) { if (data_vio != NULL) {
...@@ -1522,13 +1522,13 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock) ...@@ -1522,13 +1522,13 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock)
* Move the rest of the waiters over to the temp queue, preserving the order they * Move the rest of the waiters over to the temp queue, preserving the order they
* arrived at the lock. * arrived at the lock.
*/ */
vdo_transfer_all_waiters(&lock->waiters, &temp_queue); vdo_waitq_transfer_all_waiters(&lock->waiters, &temp_queue);
/* /*
* The current agent is being replaced and will have to wait to dedupe; make it the * The current agent is being replaced and will have to wait to dedupe; make it the
* first waiter since it was the first to reach the lock. * first waiter since it was the first to reach the lock.
*/ */
vdo_enqueue_waiter(&lock->waiters, &lock->agent->waiter); vdo_waitq_enqueue_waiter(&lock->waiters, &lock->agent->waiter);
lock->agent = data_vio; lock->agent = data_vio;
} else { } else {
/* No one has an allocation, so keep the current agent. */ /* No one has an allocation, so keep the current agent. */
...@@ -1536,7 +1536,7 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock) ...@@ -1536,7 +1536,7 @@ static struct data_vio *select_writing_agent(struct hash_lock *lock)
} }
/* Swap all the waiters back onto the lock's queue. */ /* Swap all the waiters back onto the lock's queue. */
vdo_transfer_all_waiters(&temp_queue, &lock->waiters); vdo_waitq_transfer_all_waiters(&temp_queue, &lock->waiters);
return data_vio; return data_vio;
} }
...@@ -1577,7 +1577,7 @@ static void start_writing(struct hash_lock *lock, struct data_vio *agent) ...@@ -1577,7 +1577,7 @@ static void start_writing(struct hash_lock *lock, struct data_vio *agent)
* If the agent compresses, it might wait indefinitely in the packer, which would be bad if * If the agent compresses, it might wait indefinitely in the packer, which would be bad if
* there are any other data_vios waiting. * there are any other data_vios waiting.
*/ */
if (vdo_has_waiters(&lock->waiters)) if (vdo_waitq_has_waiters(&lock->waiters))
cancel_data_vio_compression(agent); cancel_data_vio_compression(agent);
/* /*
...@@ -1928,7 +1928,7 @@ void vdo_release_hash_lock(struct data_vio *data_vio) ...@@ -1928,7 +1928,7 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
"unregistered hash lock must not be in the lock map"); "unregistered hash lock must not be in the lock map");
} }
ASSERT_LOG_ONLY(!vdo_has_waiters(&lock->waiters), ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
"hash lock returned to zone must have no waiters"); "hash lock returned to zone must have no waiters");
ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), ASSERT_LOG_ONLY((lock->duplicate_lock == NULL),
"hash lock returned to zone must not reference a PBN lock"); "hash lock returned to zone must not reference a PBN lock");
...@@ -2812,7 +2812,7 @@ static void dump_hash_lock(const struct hash_lock *lock) ...@@ -2812,7 +2812,7 @@ static void dump_hash_lock(const struct hash_lock *lock)
lock, state, (lock->registered ? 'D' : 'U'), lock, state, (lock->registered ? 'D' : 'U'),
(unsigned long long) lock->duplicate.pbn, (unsigned long long) lock->duplicate.pbn,
lock->duplicate.state, lock->reference_count, lock->duplicate.state, lock->reference_count,
vdo_count_waiters(&lock->waiters), lock->agent); vdo_waitq_num_waiters(&lock->waiters), lock->agent);
} }
static const char *index_state_to_string(struct hash_zones *zones, static const char *index_state_to_string(struct hash_zones *zones,
......
...@@ -146,25 +146,25 @@ void vdo_dump_all(struct vdo *vdo, const char *why) ...@@ -146,25 +146,25 @@ void vdo_dump_all(struct vdo *vdo, const char *why)
} }
/* /*
* Dump out the data_vio waiters on a wait queue. * Dump out the data_vio waiters on a waitq.
* wait_on should be the label to print for queue (e.g. logical or physical) * wait_on should be the label to print for queue (e.g. logical or physical)
*/ */
static void dump_vio_waiters(struct wait_queue *queue, char *wait_on) static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on)
{ {
struct waiter *waiter, *first = vdo_get_first_waiter(queue); struct vdo_waiter *waiter, *first = vdo_waitq_get_first_waiter(waitq);
struct data_vio *data_vio; struct data_vio *data_vio;
if (first == NULL) if (first == NULL)
return; return;
data_vio = waiter_as_data_vio(first); data_vio = vdo_waiter_as_data_vio(first);
uds_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", uds_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio)); data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio));
for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) { for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) {
data_vio = waiter_as_data_vio(waiter); data_vio = vdo_waiter_as_data_vio(waiter);
uds_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s", uds_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
data_vio, data_vio->allocation.pbn, data_vio->logical.lbn, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
data_vio->duplicate.pbn, data_vio->duplicate.pbn,
...@@ -177,7 +177,7 @@ static void dump_vio_waiters(struct wait_queue *queue, char *wait_on) ...@@ -177,7 +177,7 @@ static void dump_vio_waiters(struct wait_queue *queue, char *wait_on)
* logging brevity: * logging brevity:
* *
* R => vio completion result not VDO_SUCCESS * R => vio completion result not VDO_SUCCESS
* W => vio is on a wait queue * W => vio is on a waitq
* D => vio is a duplicate * D => vio is a duplicate
* p => vio is a partial block operation * p => vio is a partial block operation
* z => vio is a zero block * z => vio is a zero block
......
...@@ -31,9 +31,9 @@ struct flusher { ...@@ -31,9 +31,9 @@ struct flusher {
/** The first unacknowledged flush generation */ /** The first unacknowledged flush generation */
sequence_number_t first_unacknowledged_generation; sequence_number_t first_unacknowledged_generation;
/** The queue of flush requests waiting to notify other threads */ /** The queue of flush requests waiting to notify other threads */
struct wait_queue notifiers; struct vdo_wait_queue notifiers;
/** The queue of flush requests waiting for VIOs to complete */ /** The queue of flush requests waiting for VIOs to complete */
struct wait_queue pending_flushes; struct vdo_wait_queue pending_flushes;
/** The flush generation for which notifications are being sent */ /** The flush generation for which notifications are being sent */
sequence_number_t notify_generation; sequence_number_t notify_generation;
/** The logical zone to notify next */ /** The logical zone to notify next */
...@@ -93,7 +93,7 @@ static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *c ...@@ -93,7 +93,7 @@ static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *c
* *
* Return: The wait queue entry as a vdo_flush. * Return: The wait queue entry as a vdo_flush.
*/ */
static struct vdo_flush *waiter_as_flush(struct waiter *waiter) static struct vdo_flush *vdo_waiter_as_flush(struct vdo_waiter *waiter)
{ {
return container_of(waiter, struct vdo_flush, waiter); return container_of(waiter, struct vdo_flush, waiter);
} }
...@@ -195,10 +195,10 @@ static void finish_notification(struct vdo_completion *completion) ...@@ -195,10 +195,10 @@ static void finish_notification(struct vdo_completion *completion)
assert_on_flusher_thread(flusher, __func__); assert_on_flusher_thread(flusher, __func__);
vdo_enqueue_waiter(&flusher->pending_flushes, vdo_waitq_enqueue_waiter(&flusher->pending_flushes,
vdo_dequeue_next_waiter(&flusher->notifiers)); vdo_waitq_dequeue_next_waiter(&flusher->notifiers));
vdo_complete_flushes(flusher); vdo_complete_flushes(flusher);
if (vdo_has_waiters(&flusher->notifiers)) if (vdo_waitq_has_waiters(&flusher->notifiers))
notify_flush(flusher); notify_flush(flusher);
} }
...@@ -248,7 +248,8 @@ static void increment_generation(struct vdo_completion *completion) ...@@ -248,7 +248,8 @@ static void increment_generation(struct vdo_completion *completion)
*/ */
static void notify_flush(struct flusher *flusher) static void notify_flush(struct flusher *flusher)
{ {
struct vdo_flush *flush = waiter_as_flush(vdo_get_first_waiter(&flusher->notifiers)); struct vdo_flush *flush =
vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->notifiers));
flusher->notify_generation = flush->flush_generation; flusher->notify_generation = flush->flush_generation;
flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0]; flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0];
...@@ -280,8 +281,8 @@ static void flush_vdo(struct vdo_completion *completion) ...@@ -280,8 +281,8 @@ static void flush_vdo(struct vdo_completion *completion)
} }
flush->flush_generation = flusher->flush_generation++; flush->flush_generation = flusher->flush_generation++;
may_notify = !vdo_has_waiters(&flusher->notifiers); may_notify = !vdo_waitq_has_waiters(&flusher->notifiers);
vdo_enqueue_waiter(&flusher->notifiers, &flush->waiter); vdo_waitq_enqueue_waiter(&flusher->notifiers, &flush->waiter);
if (may_notify) if (may_notify)
notify_flush(flusher); notify_flush(flusher);
} }
...@@ -294,7 +295,8 @@ static void check_for_drain_complete(struct flusher *flusher) ...@@ -294,7 +295,8 @@ static void check_for_drain_complete(struct flusher *flusher)
{ {
bool drained; bool drained;
if (!vdo_is_state_draining(&flusher->state) || vdo_has_waiters(&flusher->pending_flushes)) if (!vdo_is_state_draining(&flusher->state) ||
vdo_waitq_has_waiters(&flusher->pending_flushes))
return; return;
spin_lock(&flusher->lock); spin_lock(&flusher->lock);
...@@ -321,9 +323,9 @@ void vdo_complete_flushes(struct flusher *flusher) ...@@ -321,9 +323,9 @@ void vdo_complete_flushes(struct flusher *flusher)
min(oldest_active_generation, min(oldest_active_generation,
READ_ONCE(zone->oldest_active_generation)); READ_ONCE(zone->oldest_active_generation));
while (vdo_has_waiters(&flusher->pending_flushes)) { while (vdo_waitq_has_waiters(&flusher->pending_flushes)) {
struct vdo_flush *flush = struct vdo_flush *flush =
waiter_as_flush(vdo_get_first_waiter(&flusher->pending_flushes)); vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->pending_flushes));
if (flush->flush_generation >= oldest_active_generation) if (flush->flush_generation >= oldest_active_generation)
return; return;
...@@ -333,7 +335,7 @@ void vdo_complete_flushes(struct flusher *flusher) ...@@ -333,7 +335,7 @@ void vdo_complete_flushes(struct flusher *flusher)
"acknowledged next expected flush, %llu, was: %llu", "acknowledged next expected flush, %llu, was: %llu",
(unsigned long long) flusher->first_unacknowledged_generation, (unsigned long long) flusher->first_unacknowledged_generation,
(unsigned long long) flush->flush_generation); (unsigned long long) flush->flush_generation);
vdo_dequeue_next_waiter(&flusher->pending_flushes); vdo_waitq_dequeue_next_waiter(&flusher->pending_flushes);
vdo_complete_flush(flush); vdo_complete_flush(flush);
flusher->first_unacknowledged_generation++; flusher->first_unacknowledged_generation++;
} }
...@@ -352,8 +354,8 @@ void vdo_dump_flusher(const struct flusher *flusher) ...@@ -352,8 +354,8 @@ void vdo_dump_flusher(const struct flusher *flusher)
(unsigned long long) flusher->flush_generation, (unsigned long long) flusher->flush_generation,
(unsigned long long) flusher->first_unacknowledged_generation); (unsigned long long) flusher->first_unacknowledged_generation);
uds_log_info(" notifiers queue is %s; pending_flushes queue is %s", uds_log_info(" notifiers queue is %s; pending_flushes queue is %s",
(vdo_has_waiters(&flusher->notifiers) ? "not empty" : "empty"), (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"),
(vdo_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty")); (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty"));
} }
/** /**
......
...@@ -18,7 +18,7 @@ struct vdo_flush { ...@@ -18,7 +18,7 @@ struct vdo_flush {
/* The flush bios covered by this request */ /* The flush bios covered by this request */
struct bio_list bios; struct bio_list bios;
/* The wait queue entry for this flush */ /* The wait queue entry for this flush */
struct waiter waiter; struct vdo_waiter waiter;
/* Which flush this struct represents */ /* Which flush this struct represents */
sequence_number_t flush_generation; sequence_number_t flush_generation;
}; };
......
...@@ -519,9 +519,9 @@ static int allocate_and_lock_block(struct allocation *allocation) ...@@ -519,9 +519,9 @@ static int allocate_and_lock_block(struct allocation *allocation)
* @waiter: The allocating_vio that was waiting to allocate. * @waiter: The allocating_vio that was waiting to allocate.
* @context: The context (unused). * @context: The context (unused).
*/ */
static void retry_allocation(struct waiter *waiter, void *context __always_unused) static void retry_allocation(struct vdo_waiter *waiter, void *context __always_unused)
{ {
struct data_vio *data_vio = waiter_as_data_vio(waiter); struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
/* Now that some slab has scrubbed, restart the allocation process. */ /* Now that some slab has scrubbed, restart the allocation process. */
data_vio->allocation.wait_for_clean_slab = false; data_vio->allocation.wait_for_clean_slab = false;
......
...@@ -267,9 +267,9 @@ static void assert_on_journal_thread(struct recovery_journal *journal, ...@@ -267,9 +267,9 @@ static void assert_on_journal_thread(struct recovery_journal *journal,
* Invoked whenever a data_vio is to be released from the journal, either because its entry was * Invoked whenever a data_vio is to be released from the journal, either because its entry was
* committed to disk, or because there was an error. Implements waiter_callback_fn. * committed to disk, or because there was an error. Implements waiter_callback_fn.
*/ */
static void continue_waiter(struct waiter *waiter, void *context) static void continue_waiter(struct vdo_waiter *waiter, void *context)
{ {
continue_data_vio_with_error(waiter_as_data_vio(waiter), *((int *) context)); continue_data_vio_with_error(vdo_waiter_as_data_vio(waiter), *((int *) context));
} }
/** /**
...@@ -287,8 +287,8 @@ static inline bool has_block_waiters(struct recovery_journal *journal) ...@@ -287,8 +287,8 @@ static inline bool has_block_waiters(struct recovery_journal *journal)
* has waiters. * has waiters.
*/ */
return ((block != NULL) && return ((block != NULL) &&
(vdo_has_waiters(&block->entry_waiters) || (vdo_waitq_has_waiters(&block->entry_waiters) ||
vdo_has_waiters(&block->commit_waiters))); vdo_waitq_has_waiters(&block->commit_waiters)));
} }
static void recycle_journal_blocks(struct recovery_journal *journal); static void recycle_journal_blocks(struct recovery_journal *journal);
...@@ -343,14 +343,14 @@ static void check_for_drain_complete(struct recovery_journal *journal) ...@@ -343,14 +343,14 @@ static void check_for_drain_complete(struct recovery_journal *journal)
recycle_journal_blocks(journal); recycle_journal_blocks(journal);
/* Release any data_vios waiting to be assigned entries. */ /* Release any data_vios waiting to be assigned entries. */
vdo_notify_all_waiters(&journal->entry_waiters, continue_waiter, vdo_waitq_notify_all_waiters(&journal->entry_waiters,
&result); continue_waiter, &result);
} }
if (!vdo_is_state_draining(&journal->state) || if (!vdo_is_state_draining(&journal->state) ||
journal->reaping || journal->reaping ||
has_block_waiters(journal) || has_block_waiters(journal) ||
vdo_has_waiters(&journal->entry_waiters) || vdo_waitq_has_waiters(&journal->entry_waiters) ||
!suspend_lock_counter(&journal->lock_counter)) !suspend_lock_counter(&journal->lock_counter))
return; return;
...@@ -721,7 +721,7 @@ int vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state, nonce_t ...@@ -721,7 +721,7 @@ int vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state, nonce_t
INIT_LIST_HEAD(&journal->free_tail_blocks); INIT_LIST_HEAD(&journal->free_tail_blocks);
INIT_LIST_HEAD(&journal->active_tail_blocks); INIT_LIST_HEAD(&journal->active_tail_blocks);
vdo_initialize_wait_queue(&journal->pending_writes); vdo_waitq_init(&journal->pending_writes);
journal->thread_id = vdo->thread_config.journal_thread; journal->thread_id = vdo->thread_config.journal_thread;
journal->origin = partition->offset; journal->origin = partition->offset;
...@@ -1047,7 +1047,7 @@ static void schedule_block_write(struct recovery_journal *journal, ...@@ -1047,7 +1047,7 @@ static void schedule_block_write(struct recovery_journal *journal,
struct recovery_journal_block *block) struct recovery_journal_block *block)
{ {
if (!block->committing) if (!block->committing)
vdo_enqueue_waiter(&journal->pending_writes, &block->write_waiter); vdo_waitq_enqueue_waiter(&journal->pending_writes, &block->write_waiter);
/* /*
* At the end of adding entries, or discovering this partial block is now full and ready to * At the end of adding entries, or discovering this partial block is now full and ready to
* rewrite, we will call write_blocks() and write a whole batch. * rewrite, we will call write_blocks() and write a whole batch.
...@@ -1084,9 +1084,9 @@ static void update_usages(struct recovery_journal *journal, struct data_vio *dat ...@@ -1084,9 +1084,9 @@ static void update_usages(struct recovery_journal *journal, struct data_vio *dat
* *
* Implements waiter_callback_fn. * Implements waiter_callback_fn.
*/ */
static void assign_entry(struct waiter *waiter, void *context) static void assign_entry(struct vdo_waiter *waiter, void *context)
{ {
struct data_vio *data_vio = waiter_as_data_vio(waiter); struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
struct recovery_journal_block *block = context; struct recovery_journal_block *block = context;
struct recovery_journal *journal = block->journal; struct recovery_journal *journal = block->journal;
...@@ -1099,10 +1099,10 @@ static void assign_entry(struct waiter *waiter, void *context) ...@@ -1099,10 +1099,10 @@ static void assign_entry(struct waiter *waiter, void *context)
update_usages(journal, data_vio); update_usages(journal, data_vio);
journal->available_space--; journal->available_space--;
if (!vdo_has_waiters(&block->entry_waiters)) if (!vdo_waitq_has_waiters(&block->entry_waiters))
journal->events.blocks.started++; journal->events.blocks.started++;
vdo_enqueue_waiter(&block->entry_waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&block->entry_waiters, &data_vio->waiter);
block->entry_count++; block->entry_count++;
block->uncommitted_entry_count++; block->uncommitted_entry_count++;
journal->events.entries.started++; journal->events.entries.started++;
...@@ -1127,9 +1127,10 @@ static void assign_entries(struct recovery_journal *journal) ...@@ -1127,9 +1127,10 @@ static void assign_entries(struct recovery_journal *journal)
} }
journal->adding_entries = true; journal->adding_entries = true;
while (vdo_has_waiters(&journal->entry_waiters) && prepare_to_assign_entry(journal)) { while (vdo_waitq_has_waiters(&journal->entry_waiters) &&
vdo_notify_next_waiter(&journal->entry_waiters, assign_entry, prepare_to_assign_entry(journal)) {
journal->active_block); vdo_waitq_notify_next_waiter(&journal->entry_waiters,
assign_entry, journal->active_block);
} }
/* Now that we've finished with entries, see if we have a batch of blocks to write. */ /* Now that we've finished with entries, see if we have a batch of blocks to write. */
...@@ -1170,9 +1171,9 @@ static void recycle_journal_block(struct recovery_journal_block *block) ...@@ -1170,9 +1171,9 @@ static void recycle_journal_block(struct recovery_journal_block *block)
* *
* Implements waiter_callback_fn. * Implements waiter_callback_fn.
*/ */
static void continue_committed_waiter(struct waiter *waiter, void *context) static void continue_committed_waiter(struct vdo_waiter *waiter, void *context)
{ {
struct data_vio *data_vio = waiter_as_data_vio(waiter); struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
struct recovery_journal *journal = context; struct recovery_journal *journal = context;
int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS); int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
bool has_decrement; bool has_decrement;
...@@ -1216,11 +1217,12 @@ static void notify_commit_waiters(struct recovery_journal *journal) ...@@ -1216,11 +1217,12 @@ static void notify_commit_waiters(struct recovery_journal *journal)
if (block->committing) if (block->committing)
return; return;
vdo_notify_all_waiters(&block->commit_waiters, continue_committed_waiter, vdo_waitq_notify_all_waiters(&block->commit_waiters,
journal);
if (is_read_only(journal)) {
vdo_notify_all_waiters(&block->entry_waiters,
continue_committed_waiter, journal); continue_committed_waiter, journal);
if (is_read_only(journal)) {
vdo_waitq_notify_all_waiters(&block->entry_waiters,
continue_committed_waiter,
journal);
} else if (is_block_dirty(block) || !is_block_full(block)) { } else if (is_block_dirty(block) || !is_block_full(block)) {
/* Stop at partially-committed or partially-filled blocks. */ /* Stop at partially-committed or partially-filled blocks. */
return; return;
...@@ -1328,9 +1330,9 @@ static void complete_write_endio(struct bio *bio) ...@@ -1328,9 +1330,9 @@ static void complete_write_endio(struct bio *bio)
*/ */
static void add_queued_recovery_entries(struct recovery_journal_block *block) static void add_queued_recovery_entries(struct recovery_journal_block *block)
{ {
while (vdo_has_waiters(&block->entry_waiters)) { while (vdo_waitq_has_waiters(&block->entry_waiters)) {
struct data_vio *data_vio = struct data_vio *data_vio =
waiter_as_data_vio(vdo_dequeue_next_waiter(&block->entry_waiters)); vdo_waiter_as_data_vio(vdo_waitq_dequeue_next_waiter(&block->entry_waiters));
struct tree_lock *lock = &data_vio->tree_lock; struct tree_lock *lock = &data_vio->tree_lock;
struct packed_recovery_journal_entry *packed_entry; struct packed_recovery_journal_entry *packed_entry;
struct recovery_journal_entry new_entry; struct recovery_journal_entry new_entry;
...@@ -1357,7 +1359,7 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block) ...@@ -1357,7 +1359,7 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block)
data_vio->recovery_sequence_number = block->sequence_number; data_vio->recovery_sequence_number = block->sequence_number;
/* Enqueue the data_vio to wait for its entry to commit. */ /* Enqueue the data_vio to wait for its entry to commit. */
vdo_enqueue_waiter(&block->commit_waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&block->commit_waiters, &data_vio->waiter);
} }
} }
...@@ -1366,17 +1368,18 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block) ...@@ -1366,17 +1368,18 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block)
* *
* Implements waiter_callback_fn. * Implements waiter_callback_fn.
*/ */
static void write_block(struct waiter *waiter, void *context __always_unused) static void write_block(struct vdo_waiter *waiter, void *context __always_unused)
{ {
struct recovery_journal_block *block = struct recovery_journal_block *block =
container_of(waiter, struct recovery_journal_block, write_waiter); container_of(waiter, struct recovery_journal_block, write_waiter);
struct recovery_journal *journal = block->journal; struct recovery_journal *journal = block->journal;
struct packed_journal_header *header = get_block_header(block); struct packed_journal_header *header = get_block_header(block);
if (block->committing || !vdo_has_waiters(&block->entry_waiters) || is_read_only(journal)) if (block->committing || !vdo_waitq_has_waiters(&block->entry_waiters) ||
is_read_only(journal))
return; return;
block->entries_in_commit = vdo_count_waiters(&block->entry_waiters); block->entries_in_commit = vdo_waitq_num_waiters(&block->entry_waiters);
add_queued_recovery_entries(block); add_queued_recovery_entries(block);
journal->pending_write_count += 1; journal->pending_write_count += 1;
...@@ -1419,7 +1422,7 @@ static void write_blocks(struct recovery_journal *journal) ...@@ -1419,7 +1422,7 @@ static void write_blocks(struct recovery_journal *journal)
return; return;
/* Write all the full blocks. */ /* Write all the full blocks. */
vdo_notify_all_waiters(&journal->pending_writes, write_block, NULL); vdo_waitq_notify_all_waiters(&journal->pending_writes, write_block, NULL);
/* /*
* Do we need to write the active block? Only if we have no outstanding writes, even after * Do we need to write the active block? Only if we have no outstanding writes, even after
...@@ -1459,7 +1462,7 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal, ...@@ -1459,7 +1462,7 @@ void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
"journal lock not held for new entry"); "journal lock not held for new entry");
vdo_advance_journal_point(&journal->append_point, journal->entries_per_block); vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
vdo_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter);
assign_entries(journal); assign_entries(journal);
} }
...@@ -1721,8 +1724,8 @@ static void dump_recovery_block(const struct recovery_journal_block *block) ...@@ -1721,8 +1724,8 @@ static void dump_recovery_block(const struct recovery_journal_block *block)
uds_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters", uds_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters",
(unsigned long long) block->sequence_number, block->entry_count, (unsigned long long) block->sequence_number, block->entry_count,
(block->committing ? "committing" : "waiting"), (block->committing ? "committing" : "waiting"),
vdo_count_waiters(&block->entry_waiters), vdo_waitq_num_waiters(&block->entry_waiters),
vdo_count_waiters(&block->commit_waiters)); vdo_waitq_num_waiters(&block->commit_waiters));
} }
/** /**
...@@ -1745,7 +1748,7 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal ...@@ -1745,7 +1748,7 @@ void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal
(unsigned long long) journal->slab_journal_reap_head, (unsigned long long) journal->slab_journal_reap_head,
(unsigned long long) stats.disk_full, (unsigned long long) stats.disk_full,
(unsigned long long) stats.slab_journal_commits_requested, (unsigned long long) stats.slab_journal_commits_requested,
vdo_count_waiters(&journal->entry_waiters)); vdo_waitq_num_waiters(&journal->entry_waiters));
uds_log_info(" entries: started=%llu written=%llu committed=%llu", uds_log_info(" entries: started=%llu written=%llu committed=%llu",
(unsigned long long) stats.entries.started, (unsigned long long) stats.entries.started,
(unsigned long long) stats.entries.written, (unsigned long long) stats.entries.written,
......
...@@ -113,7 +113,7 @@ struct recovery_journal_block { ...@@ -113,7 +113,7 @@ struct recovery_journal_block {
/* The doubly linked pointers for the free or active lists */ /* The doubly linked pointers for the free or active lists */
struct list_head list_node; struct list_head list_node;
/* The waiter for the pending full block list */ /* The waiter for the pending full block list */
struct waiter write_waiter; struct vdo_waiter write_waiter;
/* The journal to which this block belongs */ /* The journal to which this block belongs */
struct recovery_journal *journal; struct recovery_journal *journal;
/* A pointer to the current sector in the packed block buffer */ /* A pointer to the current sector in the packed block buffer */
...@@ -133,9 +133,9 @@ struct recovery_journal_block { ...@@ -133,9 +133,9 @@ struct recovery_journal_block {
/* The number of new entries in the current commit */ /* The number of new entries in the current commit */
journal_entry_count_t entries_in_commit; journal_entry_count_t entries_in_commit;
/* The queue of vios which will make entries for the next commit */ /* The queue of vios which will make entries for the next commit */
struct wait_queue entry_waiters; struct vdo_wait_queue entry_waiters;
/* The queue of vios waiting for the current commit */ /* The queue of vios waiting for the current commit */
struct wait_queue commit_waiters; struct vdo_wait_queue commit_waiters;
}; };
struct recovery_journal { struct recovery_journal {
...@@ -146,7 +146,7 @@ struct recovery_journal { ...@@ -146,7 +146,7 @@ struct recovery_journal {
/* The block map which can hold locks on this journal */ /* The block map which can hold locks on this journal */
struct block_map *block_map; struct block_map *block_map;
/* The queue of vios waiting to make entries */ /* The queue of vios waiting to make entries */
struct wait_queue entry_waiters; struct vdo_wait_queue entry_waiters;
/* The number of free entries in the journal */ /* The number of free entries in the journal */
u64 available_space; u64 available_space;
/* The number of decrement entries which need to be made */ /* The number of decrement entries which need to be made */
...@@ -184,7 +184,7 @@ struct recovery_journal { ...@@ -184,7 +184,7 @@ struct recovery_journal {
/* A pointer to the active block (the one we are adding entries to now) */ /* A pointer to the active block (the one we are adding entries to now) */
struct recovery_journal_block *active_block; struct recovery_journal_block *active_block;
/* Journal blocks that need writing */ /* Journal blocks that need writing */
struct wait_queue pending_writes; struct vdo_wait_queue pending_writes;
/* The new block map reap head after reaping */ /* The new block map reap head after reaping */
sequence_number_t block_map_reap_head; sequence_number_t block_map_reap_head;
/* The head block number for the block map rebuild range */ /* The head block number for the block map rebuild range */
......
This diff is collapsed.
...@@ -60,13 +60,13 @@ struct journal_lock { ...@@ -60,13 +60,13 @@ struct journal_lock {
struct slab_journal { struct slab_journal {
/* A waiter object for getting a VIO pool entry */ /* A waiter object for getting a VIO pool entry */
struct waiter resource_waiter; struct vdo_waiter resource_waiter;
/* A waiter object for updating the slab summary */ /* A waiter object for updating the slab summary */
struct waiter slab_summary_waiter; struct vdo_waiter slab_summary_waiter;
/* A waiter object for getting a vio with which to flush */ /* A waiter object for getting a vio with which to flush */
struct waiter flush_waiter; struct vdo_waiter flush_waiter;
/* The queue of VIOs waiting to make an entry */ /* The queue of VIOs waiting to make an entry */
struct wait_queue entry_waiters; struct vdo_wait_queue entry_waiters;
/* The parent slab reference of this journal */ /* The parent slab reference of this journal */
struct vdo_slab *slab; struct vdo_slab *slab;
...@@ -149,7 +149,7 @@ struct slab_journal { ...@@ -149,7 +149,7 @@ struct slab_journal {
*/ */
struct reference_block { struct reference_block {
/* This block waits on the ref_counts to tell it to write */ /* This block waits on the ref_counts to tell it to write */
struct waiter waiter; struct vdo_waiter waiter;
/* The slab to which this reference_block belongs */ /* The slab to which this reference_block belongs */
struct vdo_slab *slab; struct vdo_slab *slab;
/* The number of references in this block that represent allocations */ /* The number of references in this block that represent allocations */
...@@ -241,12 +241,12 @@ struct vdo_slab { ...@@ -241,12 +241,12 @@ struct vdo_slab {
struct search_cursor search_cursor; struct search_cursor search_cursor;
/* A list of the dirty blocks waiting to be written out */ /* A list of the dirty blocks waiting to be written out */
struct wait_queue dirty_blocks; struct vdo_wait_queue dirty_blocks;
/* The number of blocks which are currently writing */ /* The number of blocks which are currently writing */
size_t active_count; size_t active_count;
/* A waiter object for updating the slab summary */ /* A waiter object for updating the slab summary */
struct waiter summary_waiter; struct vdo_waiter summary_waiter;
/* The latest slab journal for which there has been a reference count update */ /* The latest slab journal for which there has been a reference count update */
struct journal_point slab_journal_point; struct journal_point slab_journal_point;
...@@ -271,7 +271,7 @@ struct slab_scrubber { ...@@ -271,7 +271,7 @@ struct slab_scrubber {
/* The queue of slabs to scrub once there are no high_priority_slabs */ /* The queue of slabs to scrub once there are no high_priority_slabs */
struct list_head slabs; struct list_head slabs;
/* The queue of VIOs waiting for a slab to be scrubbed */ /* The queue of VIOs waiting for a slab to be scrubbed */
struct wait_queue waiters; struct vdo_wait_queue waiters;
/* /*
* The number of slabs that are unrecovered or being scrubbed. This field is modified by * The number of slabs that are unrecovered or being scrubbed. This field is modified by
...@@ -341,9 +341,9 @@ struct slab_summary_block { ...@@ -341,9 +341,9 @@ struct slab_summary_block {
/* Whether this block has a write outstanding */ /* Whether this block has a write outstanding */
bool writing; bool writing;
/* Ring of updates waiting on the outstanding write */ /* Ring of updates waiting on the outstanding write */
struct wait_queue current_update_waiters; struct vdo_wait_queue current_update_waiters;
/* Ring of updates waiting on the next write */ /* Ring of updates waiting on the next write */
struct wait_queue next_update_waiters; struct vdo_wait_queue next_update_waiters;
/* The active slab_summary_entry array for this block */ /* The active slab_summary_entry array for this block */
struct slab_summary_entry *entries; struct slab_summary_entry *entries;
/* The vio used to write this block */ /* The vio used to write this block */
...@@ -522,7 +522,7 @@ int __must_check vdo_allocate_block(struct block_allocator *allocator, ...@@ -522,7 +522,7 @@ int __must_check vdo_allocate_block(struct block_allocator *allocator,
physical_block_number_t *block_number_ptr); physical_block_number_t *block_number_ptr);
int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator, int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator,
struct waiter *waiter); struct vdo_waiter *waiter);
void vdo_modify_reference_count(struct vdo_completion *completion, void vdo_modify_reference_count(struct vdo_completion *completion,
struct reference_updater *updater); struct reference_updater *updater);
......
...@@ -25,7 +25,7 @@ struct vio_pool { ...@@ -25,7 +25,7 @@ struct vio_pool {
/** The list of objects which are available */ /** The list of objects which are available */
struct list_head available; struct list_head available;
/** The queue of requestors waiting for objects from the pool */ /** The queue of requestors waiting for objects from the pool */
struct wait_queue waiting; struct vdo_wait_queue waiting;
/** The number of objects currently in use */ /** The number of objects currently in use */
size_t busy_count; size_t busy_count;
/** The list of objects which are in use */ /** The list of objects which are in use */
...@@ -364,7 +364,7 @@ void free_vio_pool(struct vio_pool *pool) ...@@ -364,7 +364,7 @@ void free_vio_pool(struct vio_pool *pool)
return; return;
/* Remove all available vios from the object pool. */ /* Remove all available vios from the object pool. */
ASSERT_LOG_ONLY(!vdo_has_waiters(&pool->waiting), ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
"VIO pool must not have any waiters when being freed"); "VIO pool must not have any waiters when being freed");
ASSERT_LOG_ONLY((pool->busy_count == 0), ASSERT_LOG_ONLY((pool->busy_count == 0),
"VIO pool must not have %zu busy entries when being freed", "VIO pool must not have %zu busy entries when being freed",
...@@ -400,7 +400,7 @@ bool is_vio_pool_busy(struct vio_pool *pool) ...@@ -400,7 +400,7 @@ bool is_vio_pool_busy(struct vio_pool *pool)
* @pool: The vio pool. * @pool: The vio pool.
* @waiter: Object that is requesting a vio. * @waiter: Object that is requesting a vio.
*/ */
void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter) void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
{ {
struct pooled_vio *pooled; struct pooled_vio *pooled;
...@@ -408,7 +408,7 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter) ...@@ -408,7 +408,7 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter)
"acquire from active vio_pool called from correct thread"); "acquire from active vio_pool called from correct thread");
if (list_empty(&pool->available)) { if (list_empty(&pool->available)) {
vdo_enqueue_waiter(&pool->waiting, waiter); vdo_waitq_enqueue_waiter(&pool->waiting, waiter);
return; return;
} }
...@@ -430,8 +430,8 @@ void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio) ...@@ -430,8 +430,8 @@ void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
vio->vio.completion.error_handler = NULL; vio->vio.completion.error_handler = NULL;
vio->vio.completion.parent = NULL; vio->vio.completion.parent = NULL;
if (vdo_has_waiters(&pool->waiting)) { if (vdo_waitq_has_waiters(&pool->waiting)) {
vdo_notify_next_waiter(&pool->waiting, NULL, vio); vdo_waitq_notify_next_waiter(&pool->waiting, NULL, vio);
return; return;
} }
......
...@@ -193,7 +193,7 @@ int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t th ...@@ -193,7 +193,7 @@ int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t th
void *context, struct vio_pool **pool_ptr); void *context, struct vio_pool **pool_ptr);
void free_vio_pool(struct vio_pool *pool); void free_vio_pool(struct vio_pool *pool);
bool __must_check is_vio_pool_busy(struct vio_pool *pool); bool __must_check is_vio_pool_busy(struct vio_pool *pool);
void acquire_vio_from_pool(struct vio_pool *pool, struct waiter *waiter); void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio); void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
#endif /* VIO_H */ #endif /* VIO_H */
This diff is collapsed.
...@@ -10,122 +10,132 @@ ...@@ -10,122 +10,132 @@
#include <linux/types.h> #include <linux/types.h>
/** /**
* DOC: Wait queues. * A vdo_wait_queue is a circular singly linked list of entries waiting to be notified
* of a change in a condition. Keeping a circular list allows the vdo_wait_queue
* structure to simply be a pointer to the tail (newest) entry, supporting
* constant-time enqueue and dequeue operations. A null pointer is an empty waitq.
* *
* A wait queue is a circular list of entries waiting to be notified of a change in a condition. * An empty waitq:
* Keeping a circular list allows the queue structure to simply be a pointer to the tail (newest) * waitq0.last_waiter -> NULL
* entry in the queue, supporting constant-time enqueue and dequeue operations. A null pointer is
* an empty queue.
* *
* An empty queue: * A singleton waitq:
* queue0.last_waiter -> NULL * waitq1.last_waiter -> entry1 -> entry1 -> [...]
* *
* A singleton queue: * A three-element waitq:
* queue1.last_waiter -> entry1 -> entry1 -> [...] * waitq2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...]
* *
* A three-element queue: * linux/wait.h's wait_queue_head is _not_ used because vdo_wait_queue's
* queue2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...] * interface is much less complex (doesn't need locking, priorities or timers).
* Made possible by vdo's thread-based resource allocation and locking; and
* the polling nature of vdo_wait_queue consumers.
*
* FIXME: could be made to use a linux/list.h's list_head but its extra barriers
* really aren't needed. Nor is a doubly linked list, but vdo_wait_queue could
* make use of __list_del_clearprev() -- but that would compromise the ability
* to make full use of linux's list interface.
*/ */
struct waiter; struct vdo_waiter;
struct wait_queue { struct vdo_wait_queue {
/* The tail of the queue, the last (most recently added) entry */ /* The tail of the queue, the last (most recently added) entry */
struct waiter *last_waiter; struct vdo_waiter *last_waiter;
/* The number of waiters currently in the queue */ /* The number of waiters currently in the queue */
size_t queue_length; size_t length;
}; };
/** /**
* typedef waiter_callback_fn - Callback type for functions which will be called to resume * vdo_waiter_callback_fn - Callback type that will be called to resume processing
* processing of a waiter after it has been removed from its wait * of a waiter after it has been removed from its wait queue.
* queue.
*/ */
typedef void (*waiter_callback_fn)(struct waiter *waiter, void *context); typedef void (*vdo_waiter_callback_fn)(struct vdo_waiter *waiter, void *context);
/** /**
* typedef waiter_match_fn - Method type for waiter matching methods. * vdo_waiter_match_fn - Method type for waiter matching methods.
* *
* A waiter_match_fn method returns false if the waiter does not match. * Returns false if the waiter does not match.
*/ */
typedef bool (*waiter_match_fn)(struct waiter *waiter, void *context); typedef bool (*vdo_waiter_match_fn)(struct vdo_waiter *waiter, void *context);
/* The queue entry structure for entries in a wait_queue. */ /* The structure for entries in a vdo_wait_queue. */
struct waiter { struct vdo_waiter {
/* /*
* The next waiter in the queue. If this entry is the last waiter, then this is actually a * The next waiter in the waitq. If this entry is the last waiter, then this
* pointer back to the head of the queue. * is actually a pointer back to the head of the waitq.
*/ */
struct waiter *next_waiter; struct vdo_waiter *next_waiter;
/* Optional waiter-specific callback to invoke when waking this waiter. */ /* Optional waiter-specific callback to invoke when dequeuing this waiter. */
waiter_callback_fn callback; vdo_waiter_callback_fn callback;
}; };
/** /**
* is_waiting() - Check whether a waiter is waiting. * vdo_waiter_is_waiting() - Check whether a waiter is waiting.
* @waiter: The waiter to check. * @waiter: The waiter to check.
* *
* Return: true if the waiter is on some wait_queue. * Return: true if the waiter is on some vdo_wait_queue.
*/ */
static inline bool vdo_is_waiting(struct waiter *waiter) static inline bool vdo_waiter_is_waiting(struct vdo_waiter *waiter)
{ {
return (waiter->next_waiter != NULL); return (waiter->next_waiter != NULL);
} }
/** /**
* initialize_wait_queue() - Initialize a wait queue. * vdo_waitq_init() - Initialize a vdo_wait_queue.
* @queue: The queue to initialize. * @waitq: The vdo_wait_queue to initialize.
*/ */
static inline void vdo_initialize_wait_queue(struct wait_queue *queue) static inline void vdo_waitq_init(struct vdo_wait_queue *waitq)
{ {
*queue = (struct wait_queue) { *waitq = (struct vdo_wait_queue) {
.last_waiter = NULL, .last_waiter = NULL,
.queue_length = 0, .length = 0,
}; };
} }
/** /**
* has_waiters() - Check whether a wait queue has any entries waiting in it. * vdo_waitq_has_waiters() - Check whether a vdo_wait_queue has any entries waiting.
* @queue: The queue to query. * @waitq: The vdo_wait_queue to query.
* *
* Return: true if there are any waiters in the queue. * Return: true if there are any waiters in the waitq.
*/ */
static inline bool __must_check vdo_has_waiters(const struct wait_queue *queue) static inline bool __must_check vdo_waitq_has_waiters(const struct vdo_wait_queue *waitq)
{ {
return (queue->last_waiter != NULL); return (waitq->last_waiter != NULL);
} }
void vdo_enqueue_waiter(struct wait_queue *queue, struct waiter *waiter); void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq,
struct vdo_waiter *waiter);
void vdo_notify_all_waiters(struct wait_queue *queue, waiter_callback_fn callback, void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq,
void *context); vdo_waiter_callback_fn callback, void *context);
bool vdo_notify_next_waiter(struct wait_queue *queue, waiter_callback_fn callback, bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq,
void *context); vdo_waiter_callback_fn callback, void *context);
void vdo_transfer_all_waiters(struct wait_queue *from_queue, void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq,
struct wait_queue *to_queue); struct vdo_wait_queue *to_waitq);
struct waiter *vdo_get_first_waiter(const struct wait_queue *queue); struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq);
void vdo_dequeue_matching_waiters(struct wait_queue *queue, waiter_match_fn match_method, void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq,
void *match_context, struct wait_queue *matched_queue); vdo_waiter_match_fn waiter_match,
void *match_context,
struct vdo_wait_queue *matched_waitq);
struct waiter *vdo_dequeue_next_waiter(struct wait_queue *queue); struct vdo_waiter *vdo_waitq_dequeue_next_waiter(struct vdo_wait_queue *waitq);
/** /**
* count_waiters() - Count the number of waiters in a wait queue. * vdo_waitq_num_waiters() - Return the number of waiters in a vdo_wait_queue.
* @queue: The wait queue to query. * @waitq: The vdo_wait_queue to query.
* *
* Return: The number of waiters in the queue. * Return: The number of waiters in the waitq.
*/ */
static inline size_t __must_check vdo_count_waiters(const struct wait_queue *queue) static inline size_t __must_check vdo_waitq_num_waiters(const struct vdo_wait_queue *waitq)
{ {
return queue->queue_length; return waitq->length;
} }
const struct waiter * __must_check vdo_get_next_waiter(const struct wait_queue *queue, const struct vdo_waiter * __must_check
const struct waiter *waiter); vdo_waitq_get_next_waiter(const struct vdo_wait_queue *waitq, const struct vdo_waiter *waiter);
#endif /* VDO_WAIT_QUEUE_H */ #endif /* VDO_WAIT_QUEUE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment