Commit d5b45dd5 authored by Brian King's avatar Brian King Committed by Martin K. Petersen

scsi: ibmvfc: Handle move login failure

When service is being performed on an SVC with NPIV enabled, the WWPN of
the canister / node being serviced fails over to the another canister /
node. This looks to the ibmvfc driver as a WWPN moving from one SCSI ID to
another. The driver will first attempt to do an implicit logout of the old
SCSI ID. If this works, we simply delete the rport at the old location and
add an rport at the new location and the FC transport class handles
everything. However, if there is I/O outstanding, this implicit logout will
fail, in which case we will send a "move login" request to the VIOS. This
will cancel any outstanding I/O to that port, logout the port, and PLOGI
the new port. Recently we've encountered a scenario where the move login
fails. This was resulting in an attempted plogi to the new scsi id, without
the old scsi id getting logged out, which is a VIOS protocol violation. To
solve this, we want to keep tracking the old scsi id as the current scsi
id. That way, once terminate_rport_io cancels the outstanding i/o, it will
send us back through to do an implicit logout of the old scsi id, rather
than the new scsi id, and then we can plogi the new scsi id.

Link: https://lore.kernel.org/r/1620756740-7045-2-git-send-email-brking@linux.vnet.ibm.comSigned-off-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 3ac0fcb4
......@@ -4299,9 +4299,10 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
tgt->scsi_id = tgt->new_scsi_id;
tgt->ids.port_id = tgt->scsi_id;
memcpy(&tgt->service_parms, &rsp->service_parms,
sizeof(tgt->service_parms));
......@@ -4319,8 +4320,8 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
tgt_log(tgt, level,
"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
"Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
status);
break;
}
......@@ -4357,8 +4358,8 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
move->common.length = cpu_to_be16(sizeof(*move));
move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
move->wwpn = cpu_to_be64(tgt->wwpn);
move->node_name = cpu_to_be64(tgt->ids.node_name);
......@@ -4367,7 +4368,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
}
/**
......@@ -4737,8 +4738,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
* normal ibmvfc_set_tgt_action to set this, as we
* don't normally want to allow this state change.
*/
wtgt->old_scsi_id = wtgt->scsi_id;
wtgt->scsi_id = scsi_id;
wtgt->new_scsi_id = scsi_id;
wtgt->action = IBMVFC_TGT_ACTION_INIT;
ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
goto unlock_out;
......
......@@ -718,7 +718,7 @@ struct ibmvfc_target {
struct ibmvfc_host *vhost;
u64 scsi_id;
u64 wwpn;
u64 old_scsi_id;
u64 new_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment