Commit ffb8d965 authored by Bruce Johnston's avatar Bruce Johnston Committed by Mike Snitzer

dm vdo int-map: rename functions to use a common vdo_int_map preamble

Reviewed-by: default avatarMatthew Sakai <msakai@redhat.com>
Signed-off-by: default avatarBruce Johnston <bjohnsto@redhat.com>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent db6b0a7f
...@@ -232,7 +232,7 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache) ...@@ -232,7 +232,7 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
if (result != UDS_SUCCESS) if (result != UDS_SUCCESS)
return result; return result;
result = vdo_make_int_map(cache->page_count, 0, &cache->page_map); result = vdo_int_map_create(cache->page_count, 0, &cache->page_map);
if (result != UDS_SUCCESS) if (result != UDS_SUCCESS)
return result; return result;
...@@ -1346,8 +1346,8 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache) ...@@ -1346,8 +1346,8 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
} }
/* Reset the page map by re-allocating it. */ /* Reset the page map by re-allocating it. */
vdo_free_int_map(uds_forget(cache->page_map)); vdo_int_map_free(uds_forget(cache->page_map));
return vdo_make_int_map(cache->page_count, 0, &cache->page_map); return vdo_int_map_create(cache->page_count, 0, &cache->page_map);
} }
/** /**
...@@ -2751,7 +2751,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map, ...@@ -2751,7 +2751,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map,
INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]); INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
} }
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->loading_pages); result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->loading_pages);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -2831,7 +2831,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone) ...@@ -2831,7 +2831,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone)
uds_free(uds_forget(zone->dirty_lists)); uds_free(uds_forget(zone->dirty_lists));
free_vio_pool(uds_forget(zone->vio_pool)); free_vio_pool(uds_forget(zone->vio_pool));
vdo_free_int_map(uds_forget(zone->loading_pages)); vdo_int_map_free(uds_forget(zone->loading_pages));
if (cache->infos != NULL) { if (cache->infos != NULL) {
struct page_info *info; struct page_info *info;
...@@ -2839,7 +2839,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone) ...@@ -2839,7 +2839,7 @@ static void uninitialize_block_map_zone(struct block_map_zone *zone)
free_vio(uds_forget(info->vio)); free_vio(uds_forget(info->vio));
} }
vdo_free_int_map(uds_forget(cache->page_map)); vdo_int_map_free(uds_forget(cache->page_map));
uds_free(uds_forget(cache->infos)); uds_free(uds_forget(cache->infos));
uds_free(uds_forget(cache->pages)); uds_free(uds_forget(cache->pages));
} }
......
...@@ -2404,7 +2404,7 @@ static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zone ...@@ -2404,7 +2404,7 @@ static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zone
data_vio_count_t i; data_vio_count_t i;
struct hash_zone *zone = &zones->zones[zone_number]; struct hash_zone *zone = &zones->zones[zone_number];
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->hash_lock_map); result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->hash_lock_map);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -2528,7 +2528,7 @@ void vdo_free_hash_zones(struct hash_zones *zones) ...@@ -2528,7 +2528,7 @@ void vdo_free_hash_zones(struct hash_zones *zones)
struct hash_zone *zone = &zones->zones[i]; struct hash_zone *zone = &zones->zones[i];
uds_free_funnel_queue(uds_forget(zone->timed_out_complete)); uds_free_funnel_queue(uds_forget(zone->timed_out_complete));
vdo_free_int_map(uds_forget(zone->hash_lock_map)); vdo_int_map_free(uds_forget(zone->hash_lock_map));
uds_free(uds_forget(zone->lock_array)); uds_free(uds_forget(zone->lock_array));
} }
......
...@@ -171,7 +171,7 @@ static int allocate_buckets(struct int_map *map, size_t capacity) ...@@ -171,7 +171,7 @@ static int allocate_buckets(struct int_map *map, size_t capacity)
} }
/** /**
* vdo_make_int_map() - Allocate and initialize an int_map. * vdo_int_map_create() - Allocate and initialize an int_map.
* @initial_capacity: The number of entries the map should initially be capable of holding (zero * @initial_capacity: The number of entries the map should initially be capable of holding (zero
* tells the map to use its own small default). * tells the map to use its own small default).
* @initial_load: The load factor of the map, expressed as an integer percentage (typically in the * @initial_load: The load factor of the map, expressed as an integer percentage (typically in the
...@@ -180,7 +180,8 @@ static int allocate_buckets(struct int_map *map, size_t capacity) ...@@ -180,7 +180,8 @@ static int allocate_buckets(struct int_map *map, size_t capacity)
* *
* Return: UDS_SUCCESS or an error code. * Return: UDS_SUCCESS or an error code.
*/ */
int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct int_map **map_ptr) int vdo_int_map_create(size_t initial_capacity, unsigned int initial_load,
struct int_map **map_ptr)
{ {
struct int_map *map; struct int_map *map;
int result; int result;
...@@ -207,7 +208,7 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct ...@@ -207,7 +208,7 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct
result = allocate_buckets(map, capacity); result = allocate_buckets(map, capacity);
if (result != UDS_SUCCESS) { if (result != UDS_SUCCESS) {
vdo_free_int_map(uds_forget(map)); vdo_int_map_free(uds_forget(map));
return result; return result;
} }
...@@ -216,13 +217,13 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct ...@@ -216,13 +217,13 @@ int vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct
} }
/** /**
* vdo_free_int_map() - Free an int_map. * vdo_int_map_free() - Free an int_map.
* @map: The int_map to free. * @map: The int_map to free.
* *
* NOTE: The map does not own the pointer values stored in the map and they are not freed by this * NOTE: The map does not own the pointer values stored in the map and they are not freed by this
* call. * call.
*/ */
void vdo_free_int_map(struct int_map *map) void vdo_int_map_free(struct int_map *map)
{ {
if (map == NULL) if (map == NULL)
return; return;
...@@ -464,7 +465,8 @@ find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_p ...@@ -464,7 +465,8 @@ find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_p
* Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no * Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no
* entry could be moved. * entry could be moved.
*/ */
static struct bucket *move_empty_bucket(struct int_map *map __always_unused, struct bucket *hole) static struct bucket *move_empty_bucket(struct int_map *map __always_unused,
struct bucket *hole)
{ {
/* /*
* Examine every neighborhood that the empty bucket is part of, starting with the one in * Examine every neighborhood that the empty bucket is part of, starting with the one in
...@@ -572,7 +574,8 @@ static bool update_mapping(struct int_map *map, ...@@ -572,7 +574,8 @@ static bool update_mapping(struct int_map *map,
* Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not * Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not
* be found or arranged. * be found or arranged.
*/ */
static struct bucket *find_or_make_vacancy(struct int_map *map, struct bucket *neighborhood) static struct bucket *find_or_make_vacancy(struct int_map *map,
struct bucket *neighborhood)
{ {
/* Probe within and beyond the neighborhood for the first empty bucket. */ /* Probe within and beyond the neighborhood for the first empty bucket. */
struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES); struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
...@@ -619,7 +622,8 @@ static struct bucket *find_or_make_vacancy(struct int_map *map, struct bucket *n ...@@ -619,7 +622,8 @@ static struct bucket *find_or_make_vacancy(struct int_map *map, struct bucket *n
* *
* Return: UDS_SUCCESS or an error code. * Return: UDS_SUCCESS or an error code.
*/ */
int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, void **old_value_ptr) int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
void **old_value_ptr)
{ {
struct bucket *neighborhood, *bucket; struct bucket *neighborhood, *bucket;
......
...@@ -23,17 +23,17 @@ ...@@ -23,17 +23,17 @@
struct int_map; struct int_map;
int __must_check int __must_check vdo_int_map_create(size_t initial_capacity, unsigned int initial_load,
vdo_make_int_map(size_t initial_capacity, unsigned int initial_load, struct int_map **map_ptr); struct int_map **map_ptr);
void vdo_free_int_map(struct int_map *map); void vdo_int_map_free(struct int_map *map);
size_t vdo_int_map_size(const struct int_map *map); size_t vdo_int_map_size(const struct int_map *map);
void *vdo_int_map_get(struct int_map *map, u64 key); void *vdo_int_map_get(struct int_map *map, u64 key);
int __must_check int __must_check vdo_int_map_put(struct int_map *map, u64 key, void *new_value,
vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update, void **old_value_ptr); bool update, void **old_value_ptr);
void *vdo_int_map_remove(struct int_map *map, u64 key); void *vdo_int_map_remove(struct int_map *map, u64 key);
......
...@@ -401,8 +401,8 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter ...@@ -401,8 +401,8 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
* uneven. So for now, we'll assume that all requests *may* wind up on one thread, * uneven. So for now, we'll assume that all requests *may* wind up on one thread,
* and thus all in the same map. * and thus all in the same map.
*/ */
result = vdo_make_int_map(max_requests_active * 2, 0, result = vdo_int_map_create(max_requests_active * 2, 0,
&bio_queue_data->map); &bio_queue_data->map);
if (result != 0) { if (result != 0) {
/* /*
* Clean up the partially initialized bio-queue entirely and indicate that * Clean up the partially initialized bio-queue entirely and indicate that
...@@ -422,7 +422,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter ...@@ -422,7 +422,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
* Clean up the partially initialized bio-queue entirely and indicate that * Clean up the partially initialized bio-queue entirely and indicate that
* initialization failed. * initialization failed.
*/ */
vdo_free_int_map(uds_forget(bio_queue_data->map)); vdo_int_map_free(uds_forget(bio_queue_data->map));
uds_log_error("bio queue initialization failed %d", result); uds_log_error("bio queue initialization failed %d", result);
vdo_cleanup_io_submitter(io_submitter); vdo_cleanup_io_submitter(io_submitter);
vdo_free_io_submitter(io_submitter); vdo_free_io_submitter(io_submitter);
...@@ -471,7 +471,7 @@ void vdo_free_io_submitter(struct io_submitter *io_submitter) ...@@ -471,7 +471,7 @@ void vdo_free_io_submitter(struct io_submitter *io_submitter)
io_submitter->num_bio_queues_used--; io_submitter->num_bio_queues_used--;
/* vdo_destroy() will free the work queue, so just give up our reference to it. */ /* vdo_destroy() will free the work queue, so just give up our reference to it. */
uds_forget(io_submitter->bio_queue_data[i].queue); uds_forget(io_submitter->bio_queue_data[i].queue);
vdo_free_int_map(uds_forget(io_submitter->bio_queue_data[i].map)); vdo_int_map_free(uds_forget(io_submitter->bio_queue_data[i].map));
} }
uds_free(io_submitter); uds_free(io_submitter);
} }
...@@ -57,7 +57,7 @@ static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number ...@@ -57,7 +57,7 @@ static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number
struct logical_zone *zone = &zones->zones[zone_number]; struct logical_zone *zone = &zones->zones[zone_number];
zone_count_t allocation_zone_number; zone_count_t allocation_zone_number;
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->lbn_operations); result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->lbn_operations);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
...@@ -137,7 +137,7 @@ void vdo_free_logical_zones(struct logical_zones *zones) ...@@ -137,7 +137,7 @@ void vdo_free_logical_zones(struct logical_zones *zones)
uds_free(uds_forget(zones->manager)); uds_free(uds_forget(zones->manager));
for (index = 0; index < zones->zone_count; index++) for (index = 0; index < zones->zone_count; index++)
vdo_free_int_map(uds_forget(zones->zones[index].lbn_operations)); vdo_int_map_free(uds_forget(zones->zones[index].lbn_operations));
uds_free(zones); uds_free(zones);
} }
......
...@@ -330,13 +330,13 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones) ...@@ -330,13 +330,13 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
zone_count_t zone_number = zones->zone_count; zone_count_t zone_number = zones->zone_count;
struct physical_zone *zone = &zones->zones[zone_number]; struct physical_zone *zone = &zones->zones[zone_number];
result = vdo_make_int_map(VDO_LOCK_MAP_CAPACITY, 0, &zone->pbn_operations); result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, 0, &zone->pbn_operations);
if (result != VDO_SUCCESS) if (result != VDO_SUCCESS)
return result; return result;
result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
if (result != VDO_SUCCESS) { if (result != VDO_SUCCESS) {
vdo_free_int_map(zone->pbn_operations); vdo_int_map_free(zone->pbn_operations);
return result; return result;
} }
...@@ -347,7 +347,7 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones) ...@@ -347,7 +347,7 @@ static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
result = vdo_make_default_thread(vdo, zone->thread_id); result = vdo_make_default_thread(vdo, zone->thread_id);
if (result != VDO_SUCCESS) { if (result != VDO_SUCCESS) {
free_pbn_lock_pool(uds_forget(zone->lock_pool)); free_pbn_lock_pool(uds_forget(zone->lock_pool));
vdo_free_int_map(zone->pbn_operations); vdo_int_map_free(zone->pbn_operations);
return result; return result;
} }
return result; return result;
...@@ -401,7 +401,7 @@ void vdo_free_physical_zones(struct physical_zones *zones) ...@@ -401,7 +401,7 @@ void vdo_free_physical_zones(struct physical_zones *zones)
struct physical_zone *zone = &zones->zones[index]; struct physical_zone *zone = &zones->zones[index];
free_pbn_lock_pool(uds_forget(zone->lock_pool)); free_pbn_lock_pool(uds_forget(zone->lock_pool));
vdo_free_int_map(uds_forget(zone->pbn_operations)); vdo_int_map_free(uds_forget(zone->pbn_operations));
} }
uds_free(zones); uds_free(zones);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment