Commit 6f6b5d1e authored by Kent Overstreet's avatar Kent Overstreet Committed by Nicholas Bellinger

percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask

This patch changes percpu_ida_alloc() + callers to accept task state
bitmask for prepare_to_wait() for code like target/iscsi that needs
it for interruptible sleep, that is provided in a subsequent patch.

It now expects TASK_UNINTERRUPTIBLE when the caller is able to sleep
waiting for a new tag, or TASK_RUNNING when the caller cannot sleep,
and is forced to return a negative value when no tags are available.

v2 changes:
  - Include blk-mq + tcm_fc + vhost/scsi + target/iscsi changes
  - Drop signal_pending_state() call
v3 changes:
  - Only call prepare_to_wait() + finish_wait() when != TASK_RUNNING
    (PeterZ)
Reported-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
Cc: <stable@vger.kernel.org> #3.12+
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 4a4caa29
...@@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) ...@@ -36,7 +36,8 @@ static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
{ {
int tag; int tag;
tag = percpu_ida_alloc(&tags->free_tags, gfp); tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
if (tag < 0) if (tag < 0)
return BLK_MQ_TAG_FAIL; return BLK_MQ_TAG_FAIL;
return tag + tags->nr_reserved_tags; return tag + tags->nr_reserved_tags;
...@@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, ...@@ -52,7 +53,8 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
return BLK_MQ_TAG_FAIL; return BLK_MQ_TAG_FAIL;
} }
tag = percpu_ida_alloc(&tags->reserved_tags, gfp); tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
if (tag < 0) if (tag < 0)
return BLK_MQ_TAG_FAIL; return BLK_MQ_TAG_FAIL;
return tag; return tag;
......
...@@ -156,9 +156,13 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) ...@@ -156,9 +156,13 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
{ {
struct iscsi_cmd *cmd; struct iscsi_cmd *cmd;
struct se_session *se_sess = conn->sess->se_sess; struct se_session *se_sess = conn->sess->se_sess;
int size, tag; int size, tag, state = (gfp_mask & __GFP_WAIT) ? TASK_UNINTERRUPTIBLE :
TASK_RUNNING;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
if (tag < 0)
return NULL;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask);
size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
memset(cmd, 0, size); memset(cmd, 0, size);
......
...@@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) ...@@ -438,7 +438,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
struct se_session *se_sess = sess->se_sess; struct se_session *se_sess = sess->se_sess;
int tag; int tag;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0) if (tag < 0)
goto busy; goto busy;
......
...@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, ...@@ -728,7 +728,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq,
} }
se_sess = tv_nexus->tvn_se_sess; se_sess = tv_nexus->tvn_se_sess;
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
if (tag < 0) { if (tag < 0) {
pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
...@@ -61,7 +62,7 @@ struct percpu_ida { ...@@ -61,7 +62,7 @@ struct percpu_ida {
/* Max size of percpu freelist, */ /* Max size of percpu freelist, */
#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); int percpu_ida_alloc(struct percpu_ida *pool, int state);
void percpu_ida_free(struct percpu_ida *pool, unsigned tag); void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
void percpu_ida_destroy(struct percpu_ida *pool); void percpu_ida_destroy(struct percpu_ida *pool);
......
...@@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) ...@@ -132,22 +132,22 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
/** /**
* percpu_ida_alloc - allocate a tag * percpu_ida_alloc - allocate a tag
* @pool: pool to allocate from * @pool: pool to allocate from
* @gfp: gfp flags * @state: task state for prepare_to_wait
* *
* Returns a tag - an integer in the range [0..nr_tags) (passed to * Returns a tag - an integer in the range [0..nr_tags) (passed to
* tag_pool_init()), or otherwise -ENOSPC on allocation failure. * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
* *
* Safe to be called from interrupt context (assuming it isn't passed * Safe to be called from interrupt context (assuming it isn't passed
* __GFP_WAIT, of course). * TASK_UNINTERRUPTIBLE, of course).
* *
* @gfp indicates whether or not to wait until a free id is available (it's not * @gfp indicates whether or not to wait until a free id is available (it's not
* used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
* however long it takes until another thread frees an id (same semantics as a * however long it takes until another thread frees an id (same semantics as a
* mempool). * mempool).
* *
* Will not fail if passed __GFP_WAIT. * Will not fail if passed TASK_UNINTERRUPTIBLE.
*/ */
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) int percpu_ida_alloc(struct percpu_ida *pool, int state)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct percpu_ida_cpu *tags; struct percpu_ida_cpu *tags;
...@@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) ...@@ -174,7 +174,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
* *
* global lock held and irqs disabled, don't need percpu lock * global lock held and irqs disabled, don't need percpu lock
*/ */
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); if (state != TASK_RUNNING)
prepare_to_wait(&pool->wait, &wait, state);
if (!tags->nr_free) if (!tags->nr_free)
alloc_global_tags(pool, tags); alloc_global_tags(pool, tags);
...@@ -191,7 +192,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) ...@@ -191,7 +192,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
local_irq_restore(flags); local_irq_restore(flags);
if (tag >= 0 || !(gfp & __GFP_WAIT)) if (tag >= 0 || state == TASK_RUNNING)
break; break;
schedule(); schedule();
...@@ -199,8 +200,9 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) ...@@ -199,8 +200,9 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
local_irq_save(flags); local_irq_save(flags);
tags = this_cpu_ptr(pool->tag_cpu); tags = this_cpu_ptr(pool->tag_cpu);
} }
if (state != TASK_RUNNING)
finish_wait(&pool->wait, &wait);
finish_wait(&pool->wait, &wait);
return tag; return tag;
} }
EXPORT_SYMBOL_GPL(percpu_ida_alloc); EXPORT_SYMBOL_GPL(percpu_ida_alloc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment