Commit d72d827f authored by Mingzhe Zou's avatar Mingzhe Zou Committed by Martin K. Petersen

scsi: target: Add iscsi/cpus_allowed_list in configfs

The RX/TX threads for iSCSI connection can be scheduled to any online CPUs,
and will not be rescheduled.

When binding other heavy load threads along with iSCSI connection RX/TX
thread to the same CPU, the iSCSI performance will be worse.

Add iscsi/cpus_allowed_list in configfs. The available CPU set of iSCSI
connection RX/TX threads is allowed_cpus & online_cpus. If it is modified,
all RX/TX threads will be rescheduled.

Link: https://lore.kernel.org/r/20220301075500.14266-1-mingzhe.zou@easystack.cnReviewed-by: default avatarMike Christie <michael.christie@oracle.com>
Signed-off-by: default avatarMingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 095478a6
...@@ -702,13 +702,19 @@ static int __init iscsi_target_init_module(void) ...@@ -702,13 +702,19 @@ static int __init iscsi_target_init_module(void)
if (!iscsit_global->ts_bitmap) if (!iscsit_global->ts_bitmap)
goto configfs_out; goto configfs_out;
if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
goto bitmap_out;
}
cpumask_setall(iscsit_global->allowed_cpumask);
lio_qr_cache = kmem_cache_create("lio_qr_cache", lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req), sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL); __alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) { if (!lio_qr_cache) {
pr_err("Unable to kmem_cache_create() for" pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n"); " lio_qr_cache\n");
goto bitmap_out; goto cpumask_out;
} }
lio_dr_cache = kmem_cache_create("lio_dr_cache", lio_dr_cache = kmem_cache_create("lio_dr_cache",
...@@ -753,6 +759,8 @@ static int __init iscsi_target_init_module(void) ...@@ -753,6 +759,8 @@ static int __init iscsi_target_init_module(void)
kmem_cache_destroy(lio_dr_cache); kmem_cache_destroy(lio_dr_cache);
qr_out: qr_out:
kmem_cache_destroy(lio_qr_cache); kmem_cache_destroy(lio_qr_cache);
cpumask_out:
free_cpumask_var(iscsit_global->allowed_cpumask);
bitmap_out: bitmap_out:
vfree(iscsit_global->ts_bitmap); vfree(iscsit_global->ts_bitmap);
configfs_out: configfs_out:
...@@ -782,6 +790,7 @@ static void __exit iscsi_target_cleanup_module(void) ...@@ -782,6 +790,7 @@ static void __exit iscsi_target_cleanup_module(void)
target_unregister_template(&iscsi_ops); target_unregister_template(&iscsi_ops);
free_cpumask_var(iscsit_global->allowed_cpumask);
vfree(iscsit_global->ts_bitmap); vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global); kfree(iscsit_global);
} }
...@@ -3587,6 +3596,11 @@ static int iscsit_send_reject( ...@@ -3587,6 +3596,11 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn) void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{ {
int ord, cpu; int ord, cpu;
cpumask_t conn_allowed_cpumask;
cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
cpu_online_mask);
/* /*
* bitmap_id is assigned from iscsit_global->ts_bitmap from * bitmap_id is assigned from iscsit_global->ts_bitmap from
* within iscsit_start_kthreads() * within iscsit_start_kthreads()
...@@ -3595,8 +3609,9 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) ...@@ -3595,8 +3609,9 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
* iSCSI connection's RX/TX threads will be scheduled to * iSCSI connection's RX/TX threads will be scheduled to
* execute upon. * execute upon.
*/ */
ord = conn->bitmap_id % cpumask_weight(cpu_online_mask); cpumask_clear(conn->conn_cpumask);
for_each_online_cpu(cpu) { ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
for_each_cpu(cpu, &conn_allowed_cpumask) {
if (ord-- == 0) { if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask); cpumask_set_cpu(cpu, conn->conn_cpumask);
return; return;
...@@ -3609,6 +3624,62 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn) ...@@ -3609,6 +3624,62 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
cpumask_setall(conn->conn_cpumask); cpumask_setall(conn->conn_cpumask);
} }
static void iscsit_thread_reschedule(struct iscsi_conn *conn)
{
/*
* If iscsit_global->allowed_cpumask modified, reschedule iSCSI
* connection's RX/TX threads update conn->allowed_cpumask.
*/
if (!cpumask_equal(iscsit_global->allowed_cpumask,
conn->allowed_cpumask)) {
iscsit_thread_get_cpumask(conn);
conn->conn_tx_reset_cpumask = 1;
conn->conn_rx_reset_cpumask = 1;
cpumask_copy(conn->allowed_cpumask,
iscsit_global->allowed_cpumask);
}
}
void iscsit_thread_check_cpumask(
struct iscsi_conn *conn,
struct task_struct *p,
int mode)
{
/*
* The TX and RX threads maybe call iscsit_thread_check_cpumask()
* at the same time. The RX thread might be faster and return from
* iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
* Then the TX thread sets it back to 1.
* The next time the RX thread loops, it sees conn_rx_reset_cpumask
* set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
*/
iscsit_thread_reschedule(conn);
/*
* mode == 1 signals iscsi_target_tx_thread() usage.
* mode == 0 signals iscsi_target_rx_thread() usage.
*/
if (mode == 1) {
if (!conn->conn_tx_reset_cpumask)
return;
} else {
if (!conn->conn_rx_reset_cpumask)
return;
}
/*
* Update the CPU mask for this single kthread so that
* both TX and RX kthreads are scheduled to run on the
* same CPU.
*/
set_cpus_allowed_ptr(p, conn->conn_cpumask);
if (mode == 1)
conn->conn_tx_reset_cpumask = 0;
else
conn->conn_rx_reset_cpumask = 0;
}
EXPORT_SYMBOL(iscsit_thread_check_cpumask);
int int
iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{ {
......
...@@ -1127,8 +1127,40 @@ static ssize_t lio_target_wwn_lio_version_show(struct config_item *item, ...@@ -1127,8 +1127,40 @@ static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version); CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
static ssize_t lio_target_wwn_cpus_allowed_list_show(
struct config_item *item, char *page)
{
return sprintf(page, "%*pbl\n",
cpumask_pr_args(iscsit_global->allowed_cpumask));
}
static ssize_t lio_target_wwn_cpus_allowed_list_store(
struct config_item *item, const char *page, size_t count)
{
int ret;
char *orig;
cpumask_t new_allowed_cpumask;
orig = kstrdup(page, GFP_KERNEL);
if (!orig)
return -ENOMEM;
cpumask_clear(&new_allowed_cpumask);
ret = cpulist_parse(orig, &new_allowed_cpumask);
kfree(orig);
if (ret != 0)
return ret;
cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
return count;
}
CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
static struct configfs_attribute *lio_target_wwn_attrs[] = { static struct configfs_attribute *lio_target_wwn_attrs[] = {
&lio_target_wwn_attr_lio_version, &lio_target_wwn_attr_lio_version,
&lio_target_wwn_attr_cpus_allowed_list,
NULL, NULL,
}; };
......
...@@ -1129,8 +1129,15 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np) ...@@ -1129,8 +1129,15 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
goto free_conn_ops; goto free_conn_ops;
} }
if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
pr_err("Unable to allocate conn->allowed_cpumask\n");
goto free_conn_cpumask;
}
return conn; return conn;
free_conn_cpumask:
free_cpumask_var(conn->conn_cpumask);
free_conn_ops: free_conn_ops:
kfree(conn->conn_ops); kfree(conn->conn_ops);
put_transport: put_transport:
...@@ -1142,6 +1149,7 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np) ...@@ -1142,6 +1149,7 @@ static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
void iscsit_free_conn(struct iscsi_conn *conn) void iscsit_free_conn(struct iscsi_conn *conn)
{ {
free_cpumask_var(conn->allowed_cpumask);
free_cpumask_var(conn->conn_cpumask); free_cpumask_var(conn->conn_cpumask);
kfree(conn->conn_ops); kfree(conn->conn_ops);
iscsit_put_transport(conn->conn_transport); iscsit_put_transport(conn->conn_transport);
......
...@@ -580,6 +580,7 @@ struct iscsi_conn { ...@@ -580,6 +580,7 @@ struct iscsi_conn {
struct ahash_request *conn_tx_hash; struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */ /* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask; cpumask_var_t conn_cpumask;
cpumask_var_t allowed_cpumask;
unsigned int conn_rx_reset_cpumask:1; unsigned int conn_rx_reset_cpumask:1;
unsigned int conn_tx_reset_cpumask:1; unsigned int conn_tx_reset_cpumask:1;
/* list_head of struct iscsi_cmd for this connection */ /* list_head of struct iscsi_cmd for this connection */
...@@ -878,6 +879,7 @@ struct iscsit_global { ...@@ -878,6 +879,7 @@ struct iscsit_global {
/* Thread Set bitmap pointer */ /* Thread Set bitmap pointer */
unsigned long *ts_bitmap; unsigned long *ts_bitmap;
spinlock_t ts_bitmap_lock; spinlock_t ts_bitmap_lock;
cpumask_var_t allowed_cpumask;
/* Used for iSCSI discovery session authentication */ /* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl; struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg; struct iscsi_portal_group *discovery_tpg;
...@@ -898,29 +900,8 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session) ...@@ -898,29 +900,8 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
static inline void iscsit_thread_check_cpumask( extern void iscsit_thread_check_cpumask(struct iscsi_conn *conn,
struct iscsi_conn *conn, struct task_struct *p,
struct task_struct *p, int mode);
int mode)
{
/*
* mode == 1 signals iscsi_target_tx_thread() usage.
* mode == 0 signals iscsi_target_rx_thread() usage.
*/
if (mode == 1) {
if (!conn->conn_tx_reset_cpumask)
return;
conn->conn_tx_reset_cpumask = 0;
} else {
if (!conn->conn_rx_reset_cpumask)
return;
conn->conn_rx_reset_cpumask = 0;
}
/*
* Update the CPU mask for this single kthread so that
* both TX and RX kthreads are scheduled to run on the
* same CPU.
*/
set_cpus_allowed_ptr(p, conn->conn_cpumask);
}
#endif /* ISCSI_TARGET_CORE_H */ #endif /* ISCSI_TARGET_CORE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment