target_core_tmr.c 12.1 KB
Newer Older
1 2 3 4 5
/*******************************************************************************
 * Filename:  target_core_tmr.c
 *
 * This file contains SPC-3 task management infrastructure
 *
6
 * (c) Copyright 2009-2013 Datera, Inc.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
29
#include <linux/export.h>
30 31

#include <target/target_core_base.h>
32 33
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
34

35
#include "target_core_internal.h"
36 37 38
#include "target_core_alua.h"
#include "target_core_pr.h"

39
int core_tmr_alloc_req(
40 41
	struct se_cmd *se_cmd,
	void *fabric_tmr_ptr,
42 43
	u8 function,
	gfp_t gfp_flags)
44 45 46
{
	struct se_tmr_req *tmr;

47
	tmr = kzalloc(sizeof(struct se_tmr_req), gfp_flags);
48 49
	if (!tmr) {
		pr_err("Unable to allocate struct se_tmr_req\n");
50
		return -ENOMEM;
51
	}
52 53 54

	se_cmd->se_cmd_flags |= SCF_SCSI_TMR_CDB;
	se_cmd->se_tmr_req = tmr;
55 56 57 58 59
	tmr->task_cmd = se_cmd;
	tmr->fabric_tmr_ptr = fabric_tmr_ptr;
	tmr->function = function;
	INIT_LIST_HEAD(&tmr->tmr_list);

60
	return 0;
61 62 63
}
EXPORT_SYMBOL(core_tmr_alloc_req);

64
void core_tmr_release_req(struct se_tmr_req *tmr)
65 66
{
	struct se_device *dev = tmr->tmr_dev;
67
	unsigned long flags;
68

69 70
	if (dev) {
		spin_lock_irqsave(&dev->se_tmr_lock, flags);
71
		list_del_init(&tmr->tmr_list);
72
		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
73 74
	}

75
	kfree(tmr);
76 77 78 79 80
}

static void core_tmr_handle_tas_abort(
	struct se_node_acl *tmr_nacl,
	struct se_cmd *cmd,
81
	int tas)
82
{
83
	bool remove = true;
84 85
	/*
	 * TASK ABORTED status (TAS) bit support
86 87
	 */
	if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
88
		remove = false;
89
		transport_send_task_abort(cmd);
90
	}
91

92
	transport_cmd_finish_abort(cmd, remove);
93 94
}

95 96
static int target_check_cdb_and_preempt(struct list_head *list,
		struct se_cmd *cmd)
97
{
98
	struct t10_pr_registration *reg;
99

100 101 102 103
	if (!list)
		return 0;
	list_for_each_entry(reg, list, pr_reg_abort_list) {
		if (reg->pr_res_key == cmd->pr_res_key)
104 105 106 107 108 109
			return 0;
	}

	return 1;
}

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
static bool __target_check_io_state(struct se_cmd *se_cmd)
{
	struct se_session *sess = se_cmd->se_sess;

	assert_spin_locked(&sess->sess_cmd_lock);
	WARN_ON_ONCE(!irqs_disabled());
	/*
	 * If command already reached CMD_T_COMPLETE state within
	 * target_complete_cmd(), this se_cmd has been passed to
	 * fabric driver and will not be aborted.
	 *
	 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
	 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
	 * long as se_cmd->cmd_kref is still active unless zero.
	 */
	spin_lock(&se_cmd->t_state_lock);
	if (se_cmd->transport_state & CMD_T_COMPLETE) {
		pr_debug("Attempted to abort io tag: %llu already complete,"
			" skipping\n", se_cmd->tag);
		spin_unlock(&se_cmd->t_state_lock);
		return false;
	}
	se_cmd->transport_state |= CMD_T_ABORTED;
	spin_unlock(&se_cmd->t_state_lock);

	return kref_get_unless_zero(&se_cmd->cmd_kref);
}

138 139 140 141 142
void core_tmr_abort_task(
	struct se_device *dev,
	struct se_tmr_req *tmr,
	struct se_session *se_sess)
{
143
	struct se_cmd *se_cmd;
144
	unsigned long flags;
145
	u64 ref_tag;
146 147

	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
148
	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
149 150 151

		if (dev != se_cmd->se_dev)
			continue;
152

153 154
		/* skip task management functions, including tmr->task_cmd */
		if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
155 156
			continue;

157
		ref_tag = se_cmd->tag;
158 159 160
		if (tmr->ref_task_tag != ref_tag)
			continue;

161
		printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
162 163
			se_cmd->se_tfo->get_fabric_name(), ref_tag);

164
		if (!__target_check_io_state(se_cmd)) {
165
			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
166
			target_put_sess_cmd(se_cmd);
167 168 169 170 171 172 173 174
			goto out;
		}
		list_del_init(&se_cmd->se_cmd_list);
		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

		cancel_work_sync(&se_cmd->work);
		transport_wait_for_tasks(se_cmd);

175
		transport_cmd_finish_abort(se_cmd, true);
176
		target_put_sess_cmd(se_cmd);
177 178

		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
179
				" ref_tag: %llu\n", ref_tag);
180 181 182 183 184 185
		tmr->response = TMR_FUNCTION_COMPLETE;
		return;
	}
	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);

out:
186
	printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
187 188 189 190
			tmr->ref_task_tag);
	tmr->response = TMR_TASK_DOES_NOT_EXIST;
}

191
static void core_tmr_drain_tmr_list(
192 193
	struct se_device *dev,
	struct se_tmr_req *tmr,
194
	struct list_head *preempt_and_abort_list)
195
{
196
	LIST_HEAD(drain_tmr_list);
197
	struct se_session *sess;
198
	struct se_tmr_req *tmr_p, *tmr_pp;
199
	struct se_cmd *cmd;
200
	unsigned long flags;
201
	bool rc;
202 203 204 205
	/*
	 * Release all pending and outgoing TMRs aside from the received
	 * LUN_RESET tmr..
	 */
206
	spin_lock_irqsave(&dev->se_tmr_lock, flags);
207 208 209 210
	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
		/*
		 * Allow the received TMR to return with FUNCTION_COMPLETE.
		 */
211
		if (tmr_p == tmr)
212 213 214
			continue;

		cmd = tmr_p->task_cmd;
215 216
		if (!cmd) {
			pr_err("Unable to locate struct se_cmd for TMR\n");
217 218 219 220 221 222 223
			continue;
		}
		/*
		 * If this function was called with a valid pr_res_key
		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
		 * skip non regisration key matching TMRs.
		 */
224
		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
225 226
			continue;

227 228 229 230 231
		sess = cmd->se_sess;
		if (WARN_ON_ONCE(!sess))
			continue;

		spin_lock(&sess->sess_cmd_lock);
232
		spin_lock(&cmd->t_state_lock);
233
		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
234
			spin_unlock(&cmd->t_state_lock);
235
			spin_unlock(&sess->sess_cmd_lock);
236 237 238
			continue;
		}
		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
239
			spin_unlock(&cmd->t_state_lock);
240
			spin_unlock(&sess->sess_cmd_lock);
241 242
			continue;
		}
243
		cmd->transport_state |= CMD_T_ABORTED;
244 245
		spin_unlock(&cmd->t_state_lock);

246 247 248 249 250 251
		rc = kref_get_unless_zero(&cmd->cmd_kref);
		spin_unlock(&sess->sess_cmd_lock);
		if (!rc) {
			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
			continue;
		}
252
		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
253 254 255
	}
	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);

256
	list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
257
		list_del_init(&tmr_p->tmr_list);
258
		cmd = tmr_p->task_cmd;
259

260
		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
261
			" Response: 0x%02x, t_state: %d\n",
262 263
			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
			tmr_p->function, tmr_p->response, cmd->t_state);
264

265 266 267
		cancel_work_sync(&cmd->work);
		transport_wait_for_tasks(cmd);

268
		transport_cmd_finish_abort(cmd, 1);
269
		target_put_sess_cmd(cmd);
270
	}
271 272
}

273
static void core_tmr_drain_state_list(
274 275 276 277 278 279 280
	struct se_device *dev,
	struct se_cmd *prout_cmd,
	struct se_node_acl *tmr_nacl,
	int tas,
	struct list_head *preempt_and_abort_list)
{
	LIST_HEAD(drain_task_list);
281
	struct se_session *sess;
282
	struct se_cmd *cmd, *next;
283
	unsigned long flags;
284
	int rc;
285

286
	/*
287 288
	 * Complete outstanding commands with TASK_ABORTED SAM status.
	 *
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
	 * This is following sam4r17, section 5.6 Aborting commands, Table 38
	 * for TMR LUN_RESET:
	 *
	 * a) "Yes" indicates that each command that is aborted on an I_T nexus
	 * other than the one that caused the SCSI device condition is
	 * completed with TASK ABORTED status, if the TAS bit is set to one in
	 * the Control mode page (see SPC-4). "No" indicates that no status is
	 * returned for aborted commands.
	 *
	 * d) If the logical unit reset is caused by a particular I_T nexus
	 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
	 * (TASK_ABORTED status) applies.
	 *
	 * Otherwise (e.g., if triggered by a hard reset), "no"
	 * (no TASK_ABORTED SAM status) applies.
	 *
	 * Note that this seems to be independent of TAS (Task Aborted Status)
	 * in the Control Mode Page.
	 */
	spin_lock_irqsave(&dev->execute_task_lock, flags);
309
	list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
310 311 312 313
		/*
		 * For PREEMPT_AND_ABORT usage, only process commands
		 * with a matching reservation key.
		 */
314
		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
315
			continue;
316

317 318 319 320 321 322
		/*
		 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
		 */
		if (prout_cmd == cmd)
			continue;

323 324 325 326 327 328 329 330 331 332
		sess = cmd->se_sess;
		if (WARN_ON_ONCE(!sess))
			continue;

		spin_lock(&sess->sess_cmd_lock);
		rc = __target_check_io_state(cmd);
		spin_unlock(&sess->sess_cmd_lock);
		if (!rc)
			continue;

333 334
		list_move_tail(&cmd->state_list, &drain_task_list);
		cmd->state_active = false;
335 336 337 338
	}
	spin_unlock_irqrestore(&dev->execute_task_lock, flags);

	while (!list_empty(&drain_task_list)) {
339
		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
340
		list_del_init(&cmd->state_list);
341

342
		pr_debug("LUN_RESET: %s cmd: %p"
343
			" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
344
			"cdb: 0x%02x\n",
345
			(preempt_and_abort_list) ? "Preempt" : "", cmd,
346
			cmd->tag, 0,
347
			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
348
			cmd->t_task_cdb[0]);
349
		pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
350
			" -- CMD_T_ACTIVE: %d"
351
			" CMD_T_STOP: %d CMD_T_SENT: %d\n",
352
			cmd->tag, cmd->pr_res_key,
353 354 355
			(cmd->transport_state & CMD_T_ACTIVE) != 0,
			(cmd->transport_state & CMD_T_STOP) != 0,
			(cmd->transport_state & CMD_T_SENT) != 0);
356

357 358 359 360 361 362 363
		/*
		 * If the command may be queued onto a workqueue cancel it now.
		 *
		 * This is equivalent to removal from the execute queue in the
		 * loop above, but we do it down here given that
		 * cancel_work_sync may block.
		 */
364 365
		cancel_work_sync(&cmd->work);
		transport_wait_for_tasks(cmd);
366

367
		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
368
		target_put_sess_cmd(cmd);
369
	}
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
}

int core_tmr_lun_reset(
        struct se_device *dev,
        struct se_tmr_req *tmr,
        struct list_head *preempt_and_abort_list,
        struct se_cmd *prout_cmd)
{
	struct se_node_acl *tmr_nacl = NULL;
	struct se_portal_group *tmr_tpg = NULL;
	int tas;
        /*
	 * TASK_ABORTED status bit, this is configurable via ConfigFS
	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page
	 *
	 * A task aborted status (TAS) bit set to zero specifies that aborted
	 * tasks shall be terminated by the device server without any response
	 * to the application client. A TAS bit set to one specifies that tasks
	 * aborted by the actions of an I_T nexus other than the I_T nexus on
	 * which the command was received shall be completed with TASK ABORTED
	 * status (see SAM-4).
	 */
392
	tas = dev->dev_attrib.emulate_tas;
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	/*
	 * Determine if this se_tmr is coming from a $FABRIC_MOD
	 * or struct se_device passthrough..
	 */
	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
		if (tmr_nacl && tmr_tpg) {
			pr_debug("LUN_RESET: TMR caller fabric: %s"
				" initiator port %s\n",
				tmr_tpg->se_tpg_tfo->get_fabric_name(),
				tmr_nacl->initiatorname);
		}
	}
	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
		(preempt_and_abort_list) ? "Preempt" : "TMR",
		dev->transport->name, tas);

	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
412
	core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
413
				preempt_and_abort_list);
414

415 416 417 418
	/*
	 * Clear any legacy SPC-2 reservation when called during
	 * LOGICAL UNIT RESET
	 */
419
	if (!preempt_and_abort_list &&
420
	     (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
421 422
		spin_lock(&dev->dev_reservation_lock);
		dev->dev_reserved_node_acl = NULL;
423
		dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
424
		spin_unlock(&dev->dev_reservation_lock);
425
		pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
426 427
	}

428
	atomic_long_inc(&dev->num_resets);
429

430
	pr_debug("LUN_RESET: %s for [%s] Complete\n",
431
			(preempt_and_abort_list) ? "Preempt" : "TMR",
432
			dev->transport->name);
433 434
	return 0;
}
435