Commit 3f607293 authored by John Garry's avatar John Garry Committed by Jens Axboe

sbitmap: Delete old sbitmap_queue_get_shallow()

Since __sbitmap_queue_get_shallow() was introduced in commit c05e6673
("sbitmap: add sbitmap_get_shallow() operation"), it has not been used.

Delete __sbitmap_queue_get_shallow() and rename public
__sbitmap_queue_get_shallow() -> sbitmap_queue_get_shallow() as it is odd
to have public __foo but no foo at all.
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1644322024-105340-1-git-send-email-john.garry@huawei.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3301bc53
...@@ -107,7 +107,7 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, ...@@ -107,7 +107,7 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
return BLK_MQ_NO_TAG; return BLK_MQ_NO_TAG;
if (data->shallow_depth) if (data->shallow_depth)
return __sbitmap_queue_get_shallow(bt, data->shallow_depth); return sbitmap_queue_get_shallow(bt, data->shallow_depth);
else else
return __sbitmap_queue_get(bt); return __sbitmap_queue_get(bt);
} }
......
...@@ -135,7 +135,7 @@ struct sbitmap_queue { ...@@ -135,7 +135,7 @@ struct sbitmap_queue {
/** /**
* @min_shallow_depth: The minimum shallow depth which may be passed to * @min_shallow_depth: The minimum shallow depth which may be passed to
* sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). * sbitmap_queue_get_shallow()
*/ */
unsigned int min_shallow_depth; unsigned int min_shallow_depth;
}; };
...@@ -463,7 +463,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, ...@@ -463,7 +463,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
unsigned int *offset); unsigned int *offset);
/** /**
* __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption * sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled. * already disabled.
* @sbq: Bitmap queue to allocate from. * @sbq: Bitmap queue to allocate from.
...@@ -475,8 +475,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, ...@@ -475,8 +475,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
* *
* Return: Non-negative allocated bit number if successful, -1 otherwise. * Return: Non-negative allocated bit number if successful, -1 otherwise.
*/ */
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth); unsigned int shallow_depth);
/** /**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct * sbitmap_queue_get() - Try to allocate a free bit from a &struct
...@@ -498,32 +498,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, ...@@ -498,32 +498,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
return nr; return nr;
} }
/**
* sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word.
* @sbq: Bitmap queue to allocate from.
* @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
* sbitmap_queue_clear()).
* @shallow_depth: The maximum number of bits to allocate from a single word.
* See sbitmap_get_shallow().
*
* If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
* initializing @sbq.
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int *cpu,
unsigned int shallow_depth)
{
int nr;
*cpu = get_cpu();
nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
put_cpu();
return nr;
}
/** /**
* sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
* minimum shallow depth that will be used. * minimum shallow depth that will be used.
......
...@@ -557,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, ...@@ -557,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
return 0; return 0;
} }
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth) unsigned int shallow_depth)
{ {
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
return sbitmap_get_shallow(&sbq->sb, shallow_depth); return sbitmap_get_shallow(&sbq->sb, shallow_depth);
} }
EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
unsigned int min_shallow_depth) unsigned int min_shallow_depth)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment